Merge remote-tracking branch 'upstream/master'
This commit is contained in:
commit
18476fe1b9
4
.ci/openvino-onnx/Jenkinsfile
vendored
4
.ci/openvino-onnx/Jenkinsfile
vendored
@ -113,8 +113,8 @@ def buildDockerImage(Map configuration, String workdir) {
|
||||
--build-arg BUILD_TYPE=${configuration.build_type} \
|
||||
--build-arg PROTOBUF_LITE=${configuration.protobuf_lite} \
|
||||
--file=.ci/openvino-onnx/Dockerfile \
|
||||
--build-arg http_proxy=http://proxy-ir.intel.com:911/ \
|
||||
--build-arg https_proxy=http://proxy-ir.intel.com:911/ .
|
||||
--build-arg http_proxy=${HTTP_PROXY} \
|
||||
--build-arg https_proxy=${HTTPS_PROXY} .
|
||||
"""
|
||||
}
|
||||
|
||||
|
@ -10,8 +10,11 @@ Standard Caffe\* layers:
|
||||
| BN | No |
|
||||
| BatchNorm | No |
|
||||
| Bias | No |
|
||||
| Binarization (Intel experimental) | No |
|
||||
| Concat | No |
|
||||
| Convolution | No |
|
||||
| ConvolutionBinary | No |
|
||||
| Crop | No |
|
||||
| Deconvolution | No |
|
||||
| DetectionOutput | No |
|
||||
| Dropout | Not needed for inference |
|
||||
@ -21,14 +24,25 @@ Standard Caffe\* layers:
|
||||
| InnerProduct | No |
|
||||
| Input | No |
|
||||
| LRN | No |
|
||||
| Normalize | No |
|
||||
| Python | Supported only for the Python Proposal operation |
|
||||
| Permute | No |
|
||||
| Pooling | No |
|
||||
| Power | No |
|
||||
| PReLU | No |
|
||||
| PriorBox | No |
|
||||
| PriorBoxClustered | No |
|
||||
| Proposal | No |
|
||||
| PSROIPooling | No |
|
||||
| ROIPooling | No |
|
||||
| RegionYolo | No |
|
||||
| ReorgYolo | No |
|
||||
| ReLU | No |
|
||||
| Resample | No |
|
||||
| Reshape | No |
|
||||
| Scale | No |
|
||||
| ShuffleChannel | No |
|
||||
| Sigmoid | No |
|
||||
| Slice | No |
|
||||
| Softmax | No |
|
||||
| Tile | No |
|
||||
@ -41,31 +55,44 @@ Standard MXNet\* symbols:
|
||||
| Symbol Name in MXNet\*| Limitations|
|
||||
| :----------| :----------|
|
||||
| _Plus | No |
|
||||
| _contrib_box_nms | No |
|
||||
| _contrib_DeformableConvolution | No |
|
||||
| _contrib_DeformablePSROIPooling | No |
|
||||
| _contrib_MultiBoxDetection | "force_suppress" = 1 is not supported, non-default variances are not supported |
|
||||
| _contrib_MultiBoxPrior | No |
|
||||
| _contrib_Proposal | No |
|
||||
| _copy | Not needed for inference |
|
||||
| _div_scalar | No |
|
||||
| _greater_scalar | No |
|
||||
| _minus_scalar | No |
|
||||
| _mul_scalar | No |
|
||||
| _plus_scalar | No |
|
||||
| _rnn_param_concat | No |
|
||||
| _arange | No |
|
||||
| _contrib_AdaptiveAvgPooling2D | Converted to the Average Pooling with fixed paddings |
|
||||
| _maximum | No |
|
||||
| _minimum | No |
|
||||
| _np_roll | No |
|
||||
| _zeros | No |
|
||||
| add_n | No |
|
||||
| arccosh | No |
|
||||
| arcsinh | No |
|
||||
| arctanh | No |
|
||||
| broadcast_add | No |
|
||||
| broadcast_div | No |
|
||||
| broadcast_mul | No |
|
||||
| broadcast_sub | No |
|
||||
| BlockGrad | No |
|
||||
| cumsum | No |
|
||||
| div_scalar | No |
|
||||
| elementwise_sub | No |
|
||||
| elemwise_add | No |
|
||||
| elemwise_mul | No |
|
||||
| elemwise_sub | No |
|
||||
| exp | No |
|
||||
| expand_dims | No |
|
||||
| greater_scalar | No |
|
||||
| max | No |
|
||||
| minus_scalar | No |
|
||||
| null | Not needed for inference |
|
||||
| repeat | No |
|
||||
@ -74,9 +101,11 @@ Standard MXNet\* symbols:
|
||||
| round | No |
|
||||
| sigmoid | No |
|
||||
| slice | No |
|
||||
| SliceChannel | No |
|
||||
| slice_axis | No |
|
||||
| slice_channel | No |
|
||||
| slice_like | No |
|
||||
| softmax | No |
|
||||
| stack | No |
|
||||
| swapaxis | No |
|
||||
| tile | No |
|
||||
@ -100,6 +129,7 @@ Standard MXNet\* symbols:
|
||||
| L2Normalization | only 4D input is supported |
|
||||
| LRN | No |
|
||||
| LeakyReLU | supported "act_type" = "prelu", "elu", "leaky", "gelu" |
|
||||
| ones_like | No |
|
||||
| Pad | No |
|
||||
| Pooling | No |
|
||||
| ROIPooling | No |
|
||||
@ -113,6 +143,7 @@ Standard MXNet\* symbols:
|
||||
| Tile | No |
|
||||
| UpSampling | No |
|
||||
| Where | No |
|
||||
| zeros_like | No |
|
||||
|
||||
|
||||
## TensorFlow\* Supported Operations
|
||||
@ -123,18 +154,27 @@ Standard TensorFlow\* operations:
|
||||
|
||||
| Operation Name in TensorFlow\* | Limitations|
|
||||
| :----------| :----------|
|
||||
| Abs | No |
|
||||
| Acosh | No |
|
||||
| Add | No |
|
||||
| AddV2 | No |
|
||||
| AddN | No |
|
||||
| All | No |
|
||||
| ArgMax | No |
|
||||
| ArgMin | No |
|
||||
| Asinh | No |
|
||||
| Assert | Not needed for inference |
|
||||
| Assign | Not needed for inference |
|
||||
| AssignSub | Not needed for inference |
|
||||
| Atanh | No |
|
||||
| AvgPool | No |
|
||||
| AvgPoolV2 | Supported only for constant-foldable kernel_size and strides inputs |
|
||||
| AvgPool3D | No |
|
||||
| BatchMatMul | No |
|
||||
| BatchMatMulV2 | No |
|
||||
| BatchToSpaceND | No |
|
||||
| BiasAdd | No |
|
||||
| BlockLSTM | No |
|
||||
| Bucketize | CPU only |
|
||||
| BroadcastTo | No |
|
||||
| Cast | No |
|
||||
@ -144,14 +184,21 @@ Standard TensorFlow\* operations:
|
||||
| Const | No |
|
||||
| Conv2D | No |
|
||||
| Conv2DBackpropInput | No |
|
||||
| Conv3D | No |
|
||||
| Conv3DBackpropInputV2 | No |
|
||||
| Cos | No |
|
||||
| Cosh | No |
|
||||
| CropAndResize | "method" = "bilinear" only |
|
||||
| CTCGreedyDecoder | Supported only with decoded indices output in a dense format |
|
||||
| CTCLoss | Supported only with decoded indices input in a dense format |
|
||||
| CumSum | No |
|
||||
| DepthToSpace| No |
|
||||
| DepthwiseConv2dNative| No |
|
||||
| Einsum | Supported only with equation that does not contain repeated labels within a subscript |
|
||||
| Elu | No |
|
||||
| Enter | Supported only when it is fused to the TensorIterator layer |
|
||||
| Equal | No |
|
||||
| Erf | No |
|
||||
| Exit | Supported only when it is fused to the TensorIterator layer |
|
||||
| Exp | No |
|
||||
| ExpandDims | No |
|
||||
@ -163,34 +210,43 @@ Standard TensorFlow\* operations:
|
||||
| FFT | Supported only when it is part of a sub-graph of the special form |
|
||||
| FFT2D | Supported only when it is part of a sub-graph of the special form |
|
||||
| FFT3D | Supported only when it is part of a sub-graph of the special form |
|
||||
| FIFOQueueV2 | Supported only when it is part of a sub-graph of the special form |
|
||||
| Fill | No |
|
||||
| Floor | No |
|
||||
| FloorDiv | No |
|
||||
| FloorMod | No |
|
||||
| FusedBatchNorm | No |
|
||||
| FusedBatchNormV2 | No |
|
||||
| FusedBatchNormV3 | No |
|
||||
| Gather | No |
|
||||
| GatherNd | No |
|
||||
| GatherTree | No |
|
||||
| GatherV2 | No |
|
||||
| Greater | No |
|
||||
| GreaterEqual | No |
|
||||
| Identity | Not needed for shape inference |
|
||||
| IdentityN | No |
|
||||
| IFFT | Supported only when it is part of a sub-graph of the special form |
|
||||
| IFFT2D | Supported only when it is part of a sub-graph of the special form |
|
||||
| IFFT3D | Supported only when it is part of a sub-graph of the special form |
|
||||
| IteratorGetNext | Supported only when it is part of a sub-graph of the special form |
|
||||
| LRN | No |
|
||||
| LeakyRelu | No |
|
||||
| Less | No |
|
||||
| LessEqual | No |
|
||||
| Log | No |
|
||||
| Log1p | No |
|
||||
| LogicalAnd | No |
|
||||
| LogicalOr | No |
|
||||
| LogicalNot | No |
|
||||
| LogSoftmax | No |
|
||||
| LookupTableInsertV2 | Supported only when it is part of a sub-graph of the special form |
|
||||
| LoopCond | Supported only when it is fused to the TensorIterator layer |
|
||||
| MatMul | No |
|
||||
| Max | No |
|
||||
| MaxPool | No |
|
||||
| MaxPoolV2 | Supported only for constant-foldable kernel_size and strides inputs |
|
||||
| MaxPool3D | No |
|
||||
| Maximum | No |
|
||||
| Mean | No |
|
||||
| Merge | Supported only when it is fused to the TensorIterator layer |
|
||||
@ -200,9 +256,11 @@ Standard TensorFlow\* operations:
|
||||
| Mul | No |
|
||||
| Neg | No |
|
||||
| NextIteration | Supported only when it is fused to the TensorIterator layer |
|
||||
| NonMaxSuppressionV2 | No |
|
||||
| NonMaxSuppressionV3 | No |
|
||||
| NonMaxSuppressionV4 | No |
|
||||
| NonMaxSuppressionV5 | No |
|
||||
| NotEqual | No |
|
||||
| NoOp | No |
|
||||
| OneHot | No |
|
||||
| Pack | No |
|
||||
@ -211,9 +269,11 @@ Standard TensorFlow\* operations:
|
||||
| Placeholder | No |
|
||||
| PlaceholderWithDefault | No |
|
||||
| Prod | No |
|
||||
| QueueDequeueUpToV2 | Supported only when it is part of a sub-graph of the special form |
|
||||
| Range | No |
|
||||
| Rank | No |
|
||||
| RealDiv | No |
|
||||
| Reciprocal | No |
|
||||
| Relu | No |
|
||||
| Relu6 | No |
|
||||
| Reshape | No |
|
||||
@ -221,9 +281,12 @@ Standard TensorFlow\* operations:
|
||||
| ResizeNearestNeighbor | No |
|
||||
| ResourceGather| No |
|
||||
| ReverseSequence | No |
|
||||
| ReverseV2 | Supported only when can be converted to the ReverseSequence operation |
|
||||
| Roll | No |
|
||||
| Round | No |
|
||||
| Pow | No |
|
||||
| Rsqrt | No |
|
||||
| Select | No |
|
||||
| Shape | No |
|
||||
| Sigmoid | No |
|
||||
| Sin | No |
|
||||
@ -234,6 +297,10 @@ Standard TensorFlow\* operations:
|
||||
| Softplus | No |
|
||||
| Softsign | No |
|
||||
| SpaceToBatchND | No |
|
||||
| SpaceToDepth | No |
|
||||
| SparseFillEmptyRows | Supported only when it is part of a sub-graph of the special form |
|
||||
| SparseReshape | Supported only when it is part of a sub-graph of the special form |
|
||||
| SparseSegmentSum | Supported only when it is part of a sub-graph of the special form |
|
||||
| SparseToDense | CPU only |
|
||||
| Split | No |
|
||||
| SplitV | No |
|
||||
@ -242,11 +309,13 @@ Standard TensorFlow\* operations:
|
||||
| SquaredDifference | No |
|
||||
| Square| No |
|
||||
| Squeeze | The case when squeeze axis is not specified is not supported |
|
||||
| StatelessWhile | No |
|
||||
| StopGradient | Not needed for shape inference |
|
||||
| StridedSlice | Supported only for constant-foldable begin, end, and strides inputs |
|
||||
| Sub | No |
|
||||
| Sum | No |
|
||||
| Swish | No |
|
||||
| swish_f32 | No |
|
||||
| Switch | Control flow propagation |
|
||||
| Tan | No |
|
||||
| Tanh | No |
|
||||
@ -260,7 +329,9 @@ Standard TensorFlow\* operations:
|
||||
| TopkV2 | No |
|
||||
| Transpose | No |
|
||||
| Unpack | No |
|
||||
| Where | No |
|
||||
| Variable | No |
|
||||
| VariableV2 | No |
|
||||
| Where | Supported only when it is part of a sub-graph of the special form |
|
||||
| ZerosLike | No |
|
||||
|
||||
|
||||
@ -356,13 +427,15 @@ Standard Kaldi\* Layers:
|
||||
| :----------| :----------|
|
||||
| addshift | No |
|
||||
| affinecomponent | No |
|
||||
| affinecomponentpreconditionedonline | No |
|
||||
| affinetransform | No |
|
||||
| backproptruncationcomponent | No |
|
||||
| batchnormcomponent | No |
|
||||
| clipgradientcomponent | Not needed for inference |
|
||||
| concat | No |
|
||||
| convolutional1dcomponent | No |
|
||||
| convolutionalcomponent | No |
|
||||
| copy | No |
|
||||
| Crop | No |
|
||||
| elementwiseproductcomponent | No |
|
||||
| fixedaffinecomponent | No |
|
||||
| fixedbiascomponent | No |
|
||||
@ -383,9 +456,9 @@ Standard Kaldi\* Layers:
|
||||
| rectifiedlinearcomponent | No |
|
||||
| rescale | No |
|
||||
| sigmoid | No |
|
||||
| sigmoidcomponent | No |
|
||||
| softmax | No |
|
||||
| softmaxComponent | No |
|
||||
| softsign | No |
|
||||
| specaugmenttimemaskcomponent | Not needed for inference |
|
||||
| splicecomponent | No |
|
||||
| tanhcomponent | No |
|
||||
@ -404,12 +477,14 @@ Standard ONNX\* operators:
|
||||
| Acosh | No |
|
||||
| Add | No |
|
||||
| Affine | No |
|
||||
| And | No |
|
||||
| ArgMax | No |
|
||||
| ArgMin | No |
|
||||
| Asin | No |
|
||||
| Asinh | No |
|
||||
| Atan | No |
|
||||
| Atanh | No |
|
||||
| ATen | Supported only for the 'embedding_bag' operator |
|
||||
| AveragePool | No |
|
||||
| BatchMatMul | No |
|
||||
| BatchNormalization | No |
|
||||
@ -426,6 +501,7 @@ Standard ONNX\* operators:
|
||||
| Cosh | No |
|
||||
| Crop | No |
|
||||
| CumSum | No |
|
||||
| DepthToSpace | No |
|
||||
| DequantizeLinear | No |
|
||||
| DetectionOutput (Intel experimental) | No |
|
||||
| Div | No |
|
||||
@ -433,7 +509,14 @@ Standard ONNX\* operators:
|
||||
| Elu | No |
|
||||
| Equal | No |
|
||||
| Erf | No |
|
||||
| Exp | No |
|
||||
| Expand | No |
|
||||
| ExperimentalDetectronDetectionOutput (Intel experimental) | No |
|
||||
| ExperimentalDetectronGenerateProposalsSingleImage (Intel experimental) | No |
|
||||
| ExperimentalDetectronGroupNorm (Intel experimental) | No |
|
||||
| ExperimentalDetectronPriorGridGenerator (Intel experimental) | No |
|
||||
| ExperimentalDetectronROIFeatureExtractor (Intel experimental) | No |
|
||||
| ExperimentalDetectronTopKROIs (Intel experimental) | No |
|
||||
| FakeQuantize (Intel experimental) | No |
|
||||
| Fill | No |
|
||||
| Flatten | No |
|
||||
@ -451,6 +534,7 @@ Standard ONNX\* operators:
|
||||
| HardSigmoid | No |
|
||||
| Identity | Not needed for inference |
|
||||
| ImageScaler | No |
|
||||
| InstanceNormalization | No |
|
||||
| LRN | No |
|
||||
| LSTM | Peepholes are not supported |
|
||||
| LeakyRelu | No |
|
||||
@ -461,7 +545,9 @@ Standard ONNX\* operators:
|
||||
| LogicalOr | No |
|
||||
| LogSoftmax | No |
|
||||
| Loop | No |
|
||||
| LpNormalization | No |
|
||||
| MatMul | No |
|
||||
| Max | No |
|
||||
| MaxPool | No |
|
||||
| MeanVarianceNormalization | Reduction over the batch dimension is not supported, reduction over all dimensions except batch and channel ones is obligatory |
|
||||
| Min | No |
|
||||
@ -475,6 +561,7 @@ Standard ONNX\* operators:
|
||||
| Pad | No |
|
||||
| Pow | No |
|
||||
| PriorBox (Intel experimental) | No |
|
||||
| PriorBoxClustered | No |
|
||||
| QuantizeLinear | No |
|
||||
| RNN | No |
|
||||
| ROIAlign | No |
|
||||
@ -506,6 +593,7 @@ Standard ONNX\* operators:
|
||||
| Softplus | No |
|
||||
| Softsign | No |
|
||||
| SpaceToDepth | No |
|
||||
| Split | No |
|
||||
| Sqrt | No |
|
||||
| Squeeze | The case when squeeze axis is not specified is not supported |
|
||||
| Sub | No |
|
||||
|
@ -2,35 +2,45 @@
|
||||
|
||||
**Versioned name**: *ReverseSequence-1*
|
||||
|
||||
**Category**: data movement operation
|
||||
**Category**: *Data movement*
|
||||
|
||||
**Short description**: *ReverseSequence* reverses variable length slices of data.
|
||||
|
||||
**Detailed description**: *ReverseSequence* slices input along the dimension specified in the *batch_axis*, and for each slice *i*, reverses the first *lengths[i]* (the second input) elements along the dimension specified in the *seq_axis*.
|
||||
**Detailed description**
|
||||
|
||||
*ReverseSequence* slices a given input tensor `data` along the dimension specified in the *batch_axis* attribute. For each slice `i`, it reverses the first `seq_lengths[i]` elements along the dimension specified in the *seq_axis* attribute.
|
||||
|
||||
**Attributes**
|
||||
|
||||
* *batch_axis*
|
||||
|
||||
* **Description**: *batch_axis* is the index of the batch dimension.
|
||||
* **Range of values**: an integer. Can be negative.
|
||||
* **Description**: *batch_axis* is the index of the batch dimension along which `data` input tensor is sliced.
|
||||
* **Range of values**: an integer within the range `[-rank(data), rank(data) - 1]`
|
||||
* **Type**: `int`
|
||||
* **Default value**: 0
|
||||
* **Default value**: `0`
|
||||
* **Required**: *no*
|
||||
|
||||
* *seq_axis*
|
||||
|
||||
* **Description**: *seq_axis* is the index of the sequence dimension.
|
||||
* **Range of values**: an integer. Can be negative.
|
||||
* **Description**: *seq_axis* is the index of the sequence dimension along which elements of `data` input tensor are reversed.
|
||||
* **Range of values**: an integer within the range `[-rank(data), rank(data) - 1]`
|
||||
* **Type**: `int`
|
||||
* **Default value**: 1
|
||||
* **Default value**: `1`
|
||||
* **Required**: *no*
|
||||
|
||||
**Inputs**:
|
||||
**Inputs**
|
||||
|
||||
* **1**: tensor with input data to reverse. **Required.**
|
||||
* **1**: `data` - Input data to reverse. A tensor of type *T1* and rank greater or equal to 2. **Required.**
|
||||
* **2**: `seq_lengths` - Sequence lengths to reverse in the input tensor `data`. A 1D tensor comprising `data_shape[batch_axis]` elements of type *T2*. All element values must be integer values within the range `[1, data_shape[seq_axis]]`. Value `1` means, no elements are reversed. **Required.**
|
||||
|
||||
* **2**: 1D tensor populated with integers with sequence lengths in the 1st input tensor. **Required.**
|
||||
**Outputs**
|
||||
|
||||
* **1**: The result of slice and reverse `data` input tensor. A tensor of type *T1* and the same shape as `data` input tensor.
|
||||
|
||||
**Types**
|
||||
|
||||
* *T1*: any supported type.
|
||||
* *T2*: any supported numerical type.
|
||||
|
||||
**Example**
|
||||
|
||||
@ -38,19 +48,19 @@
|
||||
<layer ... type="ReverseSequence">
|
||||
<data batch_axis="0" seq_axis="1"/>
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>3</dim>
|
||||
<dim>10</dim>
|
||||
<port id="0"> <!-- data -->
|
||||
<dim>4</dim> <!-- batch_axis -->
|
||||
<dim>10</dim> <!-- seq_axis -->
|
||||
<dim>100</dim>
|
||||
<dim>200</dim>
|
||||
</port>
|
||||
<port id="1">
|
||||
<dim>3</dim>
|
||||
<dim>4</dim> <!-- seq_lengths value: [2, 4, 8, 10] -->
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="2">
|
||||
<dim>3</dim>
|
||||
<dim>4</dim>
|
||||
<dim>10</dim>
|
||||
<dim>100</dim>
|
||||
<dim>200</dim>
|
||||
|
193
docs/template_plugin/tests/functional/op_reference/cum_sum.cpp
Normal file
193
docs/template_plugin/tests/functional/op_reference/cum_sum.cpp
Normal file
@ -0,0 +1,193 @@
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include <ie_core.hpp>
|
||||
#include <ie_ngraph_utils.hpp>
|
||||
#include <ngraph/ngraph.hpp>
|
||||
#include <shared_test_classes/base/layer_test_utils.hpp>
|
||||
#include <tuple>
|
||||
|
||||
#include "base_reference_test.hpp"
|
||||
|
||||
using namespace reference_tests;
|
||||
using namespace ngraph;
|
||||
using namespace InferenceEngine;
|
||||
|
||||
namespace {
|
||||
struct CumSumParams {
|
||||
// Custom axis input and attributes
|
||||
template <class IT, class AT>
|
||||
CumSumParams(const PartialShape& shape, const element::Type& iType, const std::vector<IT>& iValues, const std::vector<IT>& oValues, const bool execlusive,
|
||||
const bool reverse, const element::Type& axisType, AT axisVal, const PartialShape& axisShape)
|
||||
: execlusive(execlusive),
|
||||
reverse(reverse),
|
||||
axisValue(axisVal),
|
||||
axisShape(axisShape),
|
||||
inShape(shape),
|
||||
axisType(axisType),
|
||||
inType(iType),
|
||||
outType(iType),
|
||||
axisData(CreateBlob(axisType, std::vector<AT> {axisVal})),
|
||||
inputData(CreateBlob(iType, iValues)),
|
||||
refData(CreateBlob(iType, oValues)),
|
||||
testDefaults(false) {}
|
||||
|
||||
// Default axis input and attributes
|
||||
template <class IT>
|
||||
CumSumParams(const PartialShape& shape, const element::Type& iType, const std::vector<IT>& iValues, const std::vector<IT>& oValues)
|
||||
: inShape(shape),
|
||||
axisType(element::i32),
|
||||
inType(iType),
|
||||
outType(iType),
|
||||
inputData(CreateBlob(iType, iValues)),
|
||||
refData(CreateBlob(iType, oValues)),
|
||||
testDefaults(true) {}
|
||||
|
||||
bool execlusive = false;
|
||||
bool reverse = false;
|
||||
int64_t axisValue = 0;
|
||||
|
||||
PartialShape axisShape;
|
||||
PartialShape inShape;
|
||||
element::Type axisType;
|
||||
element::Type inType;
|
||||
element::Type outType;
|
||||
Blob::Ptr axisData;
|
||||
Blob::Ptr inputData;
|
||||
Blob::Ptr refData;
|
||||
|
||||
bool testDefaults = false;
|
||||
};
|
||||
|
||||
class ReferenceCumSumLayerTest : public testing::TestWithParam<CumSumParams>, public CommonReferenceTest {
|
||||
public:
|
||||
void SetUp() override {
|
||||
auto params = GetParam();
|
||||
if (params.testDefaults) {
|
||||
function = CreateFunction(params.inShape, params.inType);
|
||||
inputData = {params.inputData};
|
||||
refOutData = {params.refData};
|
||||
} else {
|
||||
function = CreateFunction(params.inShape, params.inType, params.axisShape, params.axisType, params.execlusive, params.reverse);
|
||||
inputData = {params.inputData, params.axisData};
|
||||
refOutData = {params.refData};
|
||||
}
|
||||
}
|
||||
static std::string getTestCaseName(const testing::TestParamInfo<CumSumParams>& obj) {
|
||||
auto param = obj.param;
|
||||
std::ostringstream result;
|
||||
result << "testDefaults=" << param.testDefaults << "_";
|
||||
result << "axisValue=" << param.axisValue << "_";
|
||||
result << "execlusive=" << param.execlusive << "_";
|
||||
result << "reverse=" << param.reverse << "_";
|
||||
result << "inShape=" << param.inShape << "_";
|
||||
result << "iType=" << param.inType << "_";
|
||||
result << "axisType=" << param.axisType << "_";
|
||||
result << "oType=" << param.outType;
|
||||
return result.str();
|
||||
}
|
||||
|
||||
private:
|
||||
static std::shared_ptr<Function> CreateFunction(const PartialShape& data_shape, const element::Type& data_type, const PartialShape& axis_shape,
|
||||
const element::Type& axis_type, const bool execlusive, const bool reverse) {
|
||||
const auto data_param = std::make_shared<op::Parameter>(data_type, data_shape);
|
||||
const auto axis_param = std::make_shared<op::Parameter>(axis_type, axis_shape);
|
||||
const auto cum_sum = std::make_shared<op::v0::CumSum>(data_param, axis_param, execlusive, reverse);
|
||||
return std::make_shared<Function>(NodeVector {cum_sum}, ParameterVector {data_param, axis_param});
|
||||
}
|
||||
|
||||
static std::shared_ptr<Function> CreateFunction(const PartialShape& data_shape, const element::Type& data_type) {
|
||||
const auto data_param = std::make_shared<op::Parameter>(data_type, data_shape);
|
||||
const auto cum_sum = std::make_shared<op::v0::CumSum>(data_param);
|
||||
return std::make_shared<Function>(NodeVector {cum_sum}, ParameterVector {data_param});
|
||||
}
|
||||
};
|
||||
|
||||
TEST_P(ReferenceCumSumLayerTest, CompareWithHardcodedRefs) {
|
||||
Exec();
|
||||
}
|
||||
|
||||
template <element::Type_t IN_ET>
|
||||
std::vector<CumSumParams> generateCumSumParams(const element::Type& type) {
|
||||
using T = typename element_type_traits<IN_ET>::value_type;
|
||||
std::vector<CumSumParams> opParams {
|
||||
// Default axis input and attributes
|
||||
CumSumParams(PartialShape {1}, type, std::vector<T> {3}, std::vector<T> {3}),
|
||||
CumSumParams(PartialShape {6}, type, std::vector<T> {1, 2, 3, 4, 5, 6}, std::vector<T> {1, 3, 6, 10, 15, 21}),
|
||||
CumSumParams(PartialShape {2, 4}, type, std::vector<T> {0, 1, 2, 3, 4, 5, 6, 7}, std::vector<T> {0, 1, 2, 3, 4, 6, 8, 10}),
|
||||
// Custom axis input and attributes
|
||||
CumSumParams(PartialShape {6}, type, std::vector<T> {1, 2, 3, 4, 5, 6}, std::vector<T> {1, 3, 6, 10, 15, 21}, false, false, element::i32, int32_t(0),
|
||||
PartialShape {}), // axis i32
|
||||
CumSumParams(PartialShape {6}, type, std::vector<T> {1, 2, 3, 4, 5, 6}, std::vector<T> {1, 3, 6, 10, 15, 21}, false, false, element::i64, int64_t(0),
|
||||
PartialShape {}), // axis i64
|
||||
CumSumParams(PartialShape {6}, type, std::vector<T> {1, 2, 3, 4, 5, 6}, std::vector<T> {21, 20, 18, 15, 11, 6}, false, true, element::i64, int64_t(0),
|
||||
PartialShape {}),
|
||||
CumSumParams(PartialShape {6}, type, std::vector<T> {1, 2, 3, 4, 5, 6}, std::vector<T> {0, 1, 3, 6, 10, 15}, true, false, element::i64, int64_t(0),
|
||||
PartialShape {}),
|
||||
CumSumParams(PartialShape {6}, type, std::vector<T> {1, 2, 3, 4, 5, 6}, std::vector<T> {20, 18, 15, 11, 6, 0}, true, true, element::i64, int64_t(0),
|
||||
PartialShape {}),
|
||||
|
||||
CumSumParams(PartialShape {2, 4}, type, std::vector<T> {0, 1, 2, 3, 4, 5, 6, 7}, std::vector<T> {0, 1, 2, 3, 4, 6, 8, 10}, false, false, element::i32,
|
||||
int32_t(0), PartialShape {}),
|
||||
CumSumParams(PartialShape {2, 4}, type, std::vector<T> {0, 1, 2, 3, 4, 5, 6, 7}, std::vector<T> {4, 6, 8, 10, 4, 5, 6, 7}, false, true, element::i32,
|
||||
int32_t(0), PartialShape {}),
|
||||
CumSumParams(PartialShape {2, 4}, type, std::vector<T> {0, 1, 2, 3, 4, 5, 6, 7}, std::vector<T> {0, 0, 0, 0, 0, 1, 2, 3}, true, false, element::i32,
|
||||
int32_t(0), PartialShape {}),
|
||||
CumSumParams(PartialShape {2, 4}, type, std::vector<T> {0, 1, 2, 3, 4, 5, 6, 7}, std::vector<T> {4, 5, 6, 7, 0, 0, 0, 0}, true, true, element::i32,
|
||||
int32_t(0), PartialShape {}),
|
||||
CumSumParams(PartialShape {2, 4}, type, std::vector<T> {0, 1, 2, 3, 4, 5, 6, 7}, std::vector<T> {0, 1, 3, 6, 4, 9, 15, 22}, false, false, element::i32,
|
||||
int32_t(1), PartialShape {}),
|
||||
CumSumParams(PartialShape {2, 4}, type, std::vector<T> {0, 1, 2, 3, 4, 5, 6, 7}, std::vector<T> {0, 0, 1, 3, 0, 4, 9, 15}, true, false, element::i32,
|
||||
int32_t(1), PartialShape {}),
|
||||
CumSumParams(PartialShape {2, 4}, type, std::vector<T> {0, 1, 2, 3, 4, 5, 6, 7}, std::vector<T> {6, 6, 5, 3, 22, 18, 13, 7}, false, true, element::i32,
|
||||
int32_t(1), PartialShape {}),
|
||||
CumSumParams(PartialShape {2, 4}, type, std::vector<T> {0, 1, 2, 3, 4, 5, 6, 7}, std::vector<T> {6, 5, 3, 0, 18, 13, 7, 0}, true, true, element::i32,
|
||||
int32_t(1), PartialShape {}),
|
||||
|
||||
CumSumParams(PartialShape {3, 2, 4}, type,
|
||||
std::vector<T> {0, 1, 2, 3, 4, 5, 6, 7,
|
||||
8, 9, 10, 11, 12, 13, 14, 15,
|
||||
16, 17, 18, 19, 20, 21, 22, 23},
|
||||
std::vector<T> {0, 1, 2, 3, 4, 5, 6, 7,
|
||||
8, 10, 12, 14, 16, 18, 20, 22,
|
||||
24, 27, 30, 33, 36, 39, 42, 45},
|
||||
false, false, element::i32, int32_t(0), PartialShape {}),
|
||||
CumSumParams(PartialShape {3, 2, 4}, type,
|
||||
std::vector<T> {0, 1, 2, 3, 4, 5, 6, 7,
|
||||
8, 9, 10, 11, 12, 13, 14, 15,
|
||||
16, 17, 18, 19, 20, 21, 22, 23},
|
||||
std::vector<T> {0, 1, 2, 3, 4, 6, 8, 10,
|
||||
8, 9, 10, 11, 20, 22, 24, 26,
|
||||
16, 17, 18, 19, 36, 38, 40, 42},
|
||||
false, false, element::i32, int32_t(1), PartialShape {}),
|
||||
CumSumParams(PartialShape {3, 2, 4}, type,
|
||||
std::vector<T> {0, 1, 2, 3, 4, 5, 6, 7,
|
||||
8, 9, 10, 11, 12, 13, 14, 15,
|
||||
16, 17, 18, 19, 20, 21, 22, 23},
|
||||
std::vector<T> {0, 1, 3, 6, 4, 9, 15, 22,
|
||||
8, 17, 27, 38, 12, 25, 39, 54,
|
||||
16, 33, 51, 70, 20, 41, 63, 86},
|
||||
false, false, element::i32, int32_t(2), PartialShape {}),
|
||||
};
|
||||
return opParams;
|
||||
}
|
||||
|
||||
std::vector<CumSumParams> generateCumSumCombinedParams() {
|
||||
const std::vector<std::vector<CumSumParams>> opTypeParams {
|
||||
generateCumSumParams<element::Type_t::bf16>(element::bf16), generateCumSumParams<element::Type_t::f16>(element::f16),
|
||||
generateCumSumParams<element::Type_t::f32>(element::f32), generateCumSumParams<element::Type_t::i32>(element::i32),
|
||||
generateCumSumParams<element::Type_t::i64>(element::i64), generateCumSumParams<element::Type_t::u32>(element::u32),
|
||||
generateCumSumParams<element::Type_t::i8>(element::i8)};
|
||||
std::vector<CumSumParams> combinedParams;
|
||||
std::for_each(opTypeParams.begin(), opTypeParams.end(), [&](std::vector<CumSumParams> params) {
|
||||
combinedParams.insert(combinedParams.end(), params.begin(), params.end());
|
||||
});
|
||||
return combinedParams;
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_CumSum_With_Hardcoded_Refs, ReferenceCumSumLayerTest, ::testing::ValuesIn(generateCumSumCombinedParams()),
|
||||
ReferenceCumSumLayerTest::getTestCaseName);
|
||||
} // namespace
|
@ -26,6 +26,7 @@ public:
|
||||
bool transform(TransformationContext& context, ngraph::pattern::Matcher &m) override;
|
||||
bool isPrecisionPreserved(std::shared_ptr<Node> layer) const noexcept override;
|
||||
bool canBeTransformed(const TransformationContext& context, std::shared_ptr<Node> layer) const override;
|
||||
static bool isQuantizedStatic(const std::shared_ptr<const Node>& layer) noexcept;
|
||||
|
||||
protected:
|
||||
static bool isHandled(
|
||||
|
@ -297,6 +297,22 @@ bool ConcatTransformation::isHandled(const TransformationContext& context, const
|
||||
return false;
|
||||
}
|
||||
|
||||
bool ConcatTransformation::isQuantizedStatic(const std::shared_ptr<const Node>& layer) noexcept {
|
||||
const auto concat = as_type_ptr<const opset1::Concat>(layer);
|
||||
if (concat == nullptr) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const auto axis = concat->get_axis();
|
||||
const auto outputRank = concat->get_output_partial_shape(0).rank();
|
||||
if (axis < 0 && outputRank.is_dynamic()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const size_t normalizedAxis = ngraph::normalize_axis(concat->get_friendly_name(), axis, outputRank);
|
||||
return normalizedAxis == 1ul;
|
||||
}
|
||||
|
||||
} // namespace low_precision
|
||||
} // namespace pass
|
||||
} // namespace ngraph
|
||||
|
@ -7,6 +7,7 @@
|
||||
#include <memory>
|
||||
|
||||
#include <ngraph/opsets/opset1.hpp>
|
||||
#include "low_precision/concat.hpp"
|
||||
#include "low_precision/convolution.hpp"
|
||||
#include "low_precision/convolution_backprop_data.hpp"
|
||||
#include "low_precision/group_convolution.hpp"
|
||||
@ -54,6 +55,12 @@ bool ngraph::pass::low_precision::MarkupCanBeQuantized::run_on_function(std::sha
|
||||
}
|
||||
continue;
|
||||
}
|
||||
if (const auto concat = std::dynamic_pointer_cast<ngraph::opset1::Concat>(node)) {
|
||||
if (!ConcatTransformation::isQuantizedStatic(concat)) {
|
||||
setEmptyPrecisions(concat);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
@ -230,16 +230,18 @@ TEST_P(ConcatTransformation, CompareFunctions) {
|
||||
auto res = compare_functions(referenceFunction, actualFunction, true, true, false, true, false);
|
||||
ASSERT_TRUE(res.first) << res.second;
|
||||
|
||||
const auto actualFakeQuantizes = LayerTransformation::get<opset1::FakeQuantize>(actualFunction);
|
||||
ASSERT_TRUE(checkIfOutputAttributesSharedValuesAreTheSame<std::shared_ptr<PrecisionsAttribute>>(actualFakeQuantizes)) <<
|
||||
"PrecisionsAttribute are not the same";
|
||||
|
||||
ConcatTransformationTestValues testValues = std::get<2>(GetParam());
|
||||
if (testValues.checkIntervalsAlignmentAttributes) {
|
||||
auto operations = LayerTransformation::get<opset1::Concat>(actualFunction);
|
||||
operations.insert(operations.end(), actualFakeQuantizes.begin(), actualFakeQuantizes.end());
|
||||
ASSERT_TRUE(checkIfAttributesSharedValuesAreTheSame<std::shared_ptr<IntervalsAlignmentAttribute>>(operations)) <<
|
||||
"IntervalsAlignmentAttribute are not the same";
|
||||
const auto actualFakeQuantizes = LayerTransformation::get<opset1::FakeQuantize>(actualFunction);
|
||||
if (testValues.axis == 1) {
|
||||
ASSERT_TRUE(checkIfOutputAttributesSharedValuesAreTheSame<std::shared_ptr<PrecisionsAttribute>>(actualFakeQuantizes)) <<
|
||||
"PrecisionsAttribute are not the same";
|
||||
|
||||
if (testValues.checkIntervalsAlignmentAttributes) {
|
||||
auto operations = LayerTransformation::get<opset1::Concat>(actualFunction);
|
||||
operations.insert(operations.end(), actualFakeQuantizes.begin(), actualFakeQuantizes.end());
|
||||
ASSERT_TRUE(checkIfAttributesSharedValuesAreTheSame<std::shared_ptr<IntervalsAlignmentAttribute>>(operations)) <<
|
||||
"IntervalsAlignmentAttribute are not the same";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -11,11 +11,9 @@
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include <transformations/utils/utils.hpp>
|
||||
#include <transformations/init_node_info.hpp>
|
||||
#include <low_precision/concat.hpp>
|
||||
#include <low_precision/fake_quantize_decomposition.hpp>
|
||||
#include <low_precision/max_pool.hpp>
|
||||
#include <low_precision/clamp.hpp>
|
||||
|
||||
#include "common_test_utils/ngraph_test_utils.hpp"
|
||||
#include "lpt_ngraph_functions/concat_function.hpp"
|
||||
@ -45,7 +43,8 @@ public:
|
||||
ngraph::builder::subgraph::FakeQuantizeOnData fakeQuantize1;
|
||||
ngraph::builder::subgraph::FakeQuantizeOnData fakeQuantize2;
|
||||
ngraph::element::Type precisionBeforeOp;
|
||||
ngraph::builder::subgraph::DequantizationOperations dequantizationBefore;
|
||||
ngraph::builder::subgraph::DequantizationOperations dequantizationBefore1;
|
||||
ngraph::builder::subgraph::DequantizationOperations dequantizationBefore2;
|
||||
ngraph::element::Type precisionAfterOperation;
|
||||
ngraph::builder::subgraph::DequantizationOperations dequantizationAfter1;
|
||||
ngraph::builder::subgraph::DequantizationOperations dequantizationAfter2;
|
||||
@ -63,6 +62,7 @@ class ConcatTransformationTestValues {
|
||||
public:
|
||||
TestTransformationParams params;
|
||||
bool multiChannels;
|
||||
std::int64_t axis;
|
||||
ConcatTransformationActualValues actual;
|
||||
ConcatTransformationResultValues result;
|
||||
};
|
||||
@ -87,6 +87,7 @@ public:
|
||||
actualFunction = ngraph::builder::subgraph::ConcatFunction::getOriginalWithDifferentPrecisionOnChildren(
|
||||
precision,
|
||||
inputShape,
|
||||
testValues.axis,
|
||||
testValues.actual.fakeQuantize1,
|
||||
testValues.actual.fakeQuantize2);
|
||||
|
||||
@ -100,17 +101,18 @@ public:
|
||||
transform.add<ngraph::pass::low_precision::ConcatTransformation, ngraph::opset1::Concat>(testValues.params);
|
||||
transform.add<ngraph::pass::low_precision::FakeQuantizeDecompositionTransformation, ngraph::opset1::FakeQuantize>(testValues.params);
|
||||
transform.add<ngraph::pass::low_precision::MaxPoolTransformation, ngraph::opset1::MaxPool>(testValues.params);
|
||||
transform.add<ngraph::pass::low_precision::ClampTransformation, ngraph::opset1::Clamp>(testValues.params);
|
||||
transform.transform(actualFunction);
|
||||
|
||||
referenceFunction = ngraph::builder::subgraph::ConcatFunction::getReferenceWithDifferentPrecisionOnChildren(
|
||||
precision,
|
||||
inputShape,
|
||||
testValues.multiChannels,
|
||||
testValues.axis,
|
||||
testValues.result.fakeQuantize1,
|
||||
testValues.result.fakeQuantize2,
|
||||
testValues.result.precisionBeforeOp,
|
||||
testValues.result.dequantizationBefore,
|
||||
testValues.result.dequantizationBefore1,
|
||||
testValues.result.dequantizationBefore2,
|
||||
testValues.result.precisionAfterOperation,
|
||||
testValues.result.dequantizationAfter1,
|
||||
testValues.result.dequantizationAfter2);
|
||||
@ -153,6 +155,7 @@ const std::vector<ConcatTransformationTestValues> testValues = {
|
||||
{
|
||||
LayerTransformation::createParamsU8I8(),
|
||||
false,
|
||||
1,
|
||||
{
|
||||
{ 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} },
|
||||
{ 256ul, ngraph::Shape({}), {0.f}, {2.55f / 2.f}, {0.f}, {2.55f / 2.f} }
|
||||
@ -162,15 +165,37 @@ const std::vector<ConcatTransformationTestValues> testValues = {
|
||||
{ 256ul, ngraph::Shape({}), {0.f}, {2.55f / 2.f}, {0.f}, { 128.f} },
|
||||
ngraph::element::u8,
|
||||
{{}, {}, {}},
|
||||
{{}, {}, {}},
|
||||
ngraph::element::u8,
|
||||
{ ngraph::element::f32, {}, { 0.01f } },
|
||||
{ ngraph::element::f32, {}, { 0.01f } }
|
||||
}
|
||||
},
|
||||
// U8 with unsupported axis
|
||||
{
|
||||
LayerTransformation::createParamsU8I8(),
|
||||
false,
|
||||
2,
|
||||
{
|
||||
{ 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} },
|
||||
{ 256ul, ngraph::Shape({}), {0.f}, {2.55f / 2.f}, {0.f}, {2.55f / 2.f} }
|
||||
},
|
||||
{
|
||||
{ 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {255.f} },
|
||||
{ 256ul, ngraph::Shape({}), {0.f}, {2.55f / 2.f}, {0.f}, {255.f} },
|
||||
ngraph::element::u8,
|
||||
{{ngraph::element::f32}, {}, {0.01f}},
|
||||
{{ngraph::element::f32}, {}, {0.005f}},
|
||||
ngraph::element::f32,
|
||||
{{}, {}, {}},
|
||||
{{}, {}, {}}
|
||||
}
|
||||
},
|
||||
// I8
|
||||
{
|
||||
LayerTransformation::createParamsI8I8(),
|
||||
false,
|
||||
1,
|
||||
{
|
||||
{ 256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} },
|
||||
{ 256ul, ngraph::Shape({}), {-1.28f / 2.f}, {1.27f / 2.f}, {-1.28f / 2.f}, {1.27f / 2.f} }
|
||||
@ -180,6 +205,7 @@ const std::vector<ConcatTransformationTestValues> testValues = {
|
||||
{ 256ul, ngraph::Shape({}), {-1.28f / 2.f}, {1.27f / 2.f}, {-64.f}, { 64.f} },
|
||||
ngraph::element::i8,
|
||||
{{}, {}, {}},
|
||||
{{}, {}, {}},
|
||||
ngraph::element::i8,
|
||||
{ ngraph::element::f32, {}, { 0.01f } },
|
||||
{ ngraph::element::f32, {}, { 0.01f } }
|
||||
@ -189,6 +215,7 @@ const std::vector<ConcatTransformationTestValues> testValues = {
|
||||
{
|
||||
LayerTransformation::createParamsU8I8(),
|
||||
true,
|
||||
1,
|
||||
{
|
||||
{ 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} },
|
||||
{ 256ul, ngraph::Shape({}), {0.f}, {2.55f / 2.f}, {0.f}, {2.55f / 2.f} }
|
||||
@ -198,6 +225,7 @@ const std::vector<ConcatTransformationTestValues> testValues = {
|
||||
{ 256ul, ngraph::Shape({}), {0.f}, {2.55f / 2.f}, {0.f}, { 255.f} },
|
||||
ngraph::element::u8,
|
||||
{{}, {}, {}},
|
||||
{{}, {}, {}},
|
||||
ngraph::element::u8,
|
||||
{ ngraph::element::f32, {}, {{ 0.01f, 0.01f, 0.01f, 0.005f, 0.005f, 0.005f }} },
|
||||
{ ngraph::element::f32, {}, {{ 0.01f, 0.01f, 0.01f, 0.005f, 0.005f, 0.005f }} },
|
||||
@ -207,6 +235,7 @@ const std::vector<ConcatTransformationTestValues> testValues = {
|
||||
{
|
||||
LayerTransformation::createParamsI8I8(),
|
||||
true,
|
||||
1,
|
||||
{
|
||||
{ 256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} },
|
||||
{ 256ul, ngraph::Shape({}), {-1.28f / 2.f}, {1.27f / 2.f}, {-1.28f / 2.f}, {1.27f / 2.f} }
|
||||
@ -216,6 +245,7 @@ const std::vector<ConcatTransformationTestValues> testValues = {
|
||||
{ 256ul, ngraph::Shape({}), {-1.28f / 2.f}, {1.27f / 2.f}, {-128.f}, {127.f} },
|
||||
ngraph::element::i8,
|
||||
{{}, {}, {}},
|
||||
{{}, {}, {}},
|
||||
ngraph::element::i8,
|
||||
{ ngraph::element::f32, {}, {{ 0.01f, 0.01f, 0.01f, 0.005f, 0.005f, 0.005f }} },
|
||||
{ ngraph::element::f32, {}, {{ 0.01f, 0.01f, 0.01f, 0.005f, 0.005f, 0.005f }} },
|
||||
@ -225,6 +255,7 @@ const std::vector<ConcatTransformationTestValues> testValues = {
|
||||
{
|
||||
LayerTransformation::createParamsU8I8().setUpdatePrecisions(false),
|
||||
false,
|
||||
1,
|
||||
{
|
||||
{ 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} },
|
||||
{ 256ul, ngraph::Shape({}), {0.f}, {2.55f / 2.f}, {0.f}, {2.55f / 2.f} }
|
||||
@ -234,6 +265,7 @@ const std::vector<ConcatTransformationTestValues> testValues = {
|
||||
{ 256ul, ngraph::Shape({}), {0.f}, {2.55f / 2.f}, {0.f}, { 128.f} },
|
||||
ngraph::element::f32,
|
||||
{{}, {}, {}},
|
||||
{{}, {}, {}},
|
||||
ngraph::element::f32,
|
||||
{ {}, {}, { 0.01f } },
|
||||
{ {}, {}, { 0.01f } }
|
||||
|
@ -0,0 +1,37 @@
|
||||
// Copyright (C) 2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "layer_transformation.hpp"
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include "lpt_ngraph_functions/concat_function.hpp"
|
||||
#include "simple_low_precision_transformer.hpp"
|
||||
|
||||
using namespace ::testing;
|
||||
|
||||
class smoke_LPT_ConcatWithUnsupportedAxis : public Test {};
|
||||
|
||||
TEST_F(smoke_LPT_ConcatWithUnsupportedAxis, rtInfoCheck) {
|
||||
using namespace ngraph::builder::subgraph;
|
||||
|
||||
const ngraph::element::Type precision = ngraph::element::f32;
|
||||
const ngraph::PartialShape inputPShape = PartialShape{ 1, 3, 16, 16 };
|
||||
const std::int64_t unsupportedAxis = 2;
|
||||
const auto fakeQuantize = FakeQuantizeOnData{ 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} };
|
||||
|
||||
std::shared_ptr<ngraph::Function> function = ConcatFunction::getOriginalWithDifferentPrecisionOnChildren(
|
||||
precision,
|
||||
inputPShape,
|
||||
unsupportedAxis,
|
||||
fakeQuantize,
|
||||
fakeQuantize);
|
||||
|
||||
SimpleLowPrecisionTransformer transformer;
|
||||
transformer.transform(function);
|
||||
|
||||
const auto actualConcat = LayerTransformation::get<opset1::Concat>(function)[0];
|
||||
const auto& rtInfo = actualConcat->get_rt_info();
|
||||
ASSERT_TRUE(rtInfo.empty()) << "Unsupported concat mustn't contain LPT runtime attributes";
|
||||
}
|
@ -1,9 +1,3 @@
|
||||
# OMZ:
|
||||
# Hang:
|
||||
.*AvgPool_1199829.*
|
||||
.*AvgPool_1201153.*
|
||||
.*ROIPooling_1199827.*
|
||||
|
||||
# DLB:
|
||||
# Hang:
|
||||
.*Convolution_1301086.*
|
||||
|
@ -16,42 +16,48 @@ const std::vector<ngraph::element::Type> netPrecisions = {
|
||||
};
|
||||
|
||||
const std::vector<ngraph::pass::low_precision::LayerTransformation::Params> trasformationParamValues = {
|
||||
// LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParamsI8I8(),
|
||||
LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParamsU8I8()
|
||||
};
|
||||
|
||||
const std::vector<ConcatWithDifferentChildrenTransformationParam> testValues = {
|
||||
// U8
|
||||
{
|
||||
1,
|
||||
{ 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} },
|
||||
{ 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f / 2.f} }
|
||||
},
|
||||
// U8 and unsupported concat axis
|
||||
{
|
||||
2,
|
||||
{ 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} },
|
||||
{ 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f / 2.f} }
|
||||
},
|
||||
// I8
|
||||
{
|
||||
1,
|
||||
{ 256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} },
|
||||
{ 256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {-1.28f / 2.f}, {1.27f / 2.f} }
|
||||
},
|
||||
// mixed: U8 + I8
|
||||
{
|
||||
1,
|
||||
{ 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} },
|
||||
{ 256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} }
|
||||
},
|
||||
// mixed: I8 + U8
|
||||
{
|
||||
1,
|
||||
{ 256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} },
|
||||
{ 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }
|
||||
}
|
||||
};
|
||||
|
||||
const std::vector<bool> multiChannel = { true/*, false*/ };
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_LPT, ConcatWithDifferentChildrenTransformation,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(netPrecisions),
|
||||
::testing::Values(ngraph::PartialShape({ 1, 6, 10, 10 })),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU),
|
||||
::testing::ValuesIn(testValues),
|
||||
::testing::ValuesIn(trasformationParamValues),
|
||||
::testing::ValuesIn(multiChannel)),
|
||||
::testing::ValuesIn(trasformationParamValues)),
|
||||
ConcatWithDifferentChildrenTransformation::getTestCaseName);
|
||||
} // namespace
|
||||
|
@ -16,42 +16,48 @@ const std::vector<ngraph::element::Type> netPrecisions = {
|
||||
};
|
||||
|
||||
const std::vector<ngraph::pass::low_precision::LayerTransformation::Params> trasformationParamValues = {
|
||||
// LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParamsI8I8(),
|
||||
LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParamsU8I8()
|
||||
};
|
||||
|
||||
const std::vector<ConcatWithDifferentChildrenTransformationParam> testValues = {
|
||||
// U8
|
||||
{
|
||||
1,
|
||||
{ 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} },
|
||||
{ 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f / 2.f} }
|
||||
},
|
||||
// U8 and unsupported concat axis
|
||||
{
|
||||
2,
|
||||
{ 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} },
|
||||
{ 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f / 2.f} }
|
||||
},
|
||||
// I8
|
||||
{
|
||||
{ 256ul, ngraph::Shape({}), {-128.f}, {127.f}, {-1.28f}, {1.27f} },
|
||||
{ 256ul, ngraph::Shape({}), {-128.f}, {127.f}, {-1.28f / 2}, {1.27f / 2} }
|
||||
1,
|
||||
{ 256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} },
|
||||
{ 256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {-1.28f / 2.f}, {1.27f / 2.f} }
|
||||
},
|
||||
// mixed: U8 + I8
|
||||
{
|
||||
1,
|
||||
{ 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} },
|
||||
{ 256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} }
|
||||
},
|
||||
// mixed: I8 + U8
|
||||
{
|
||||
1,
|
||||
{ 256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} },
|
||||
{ 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }
|
||||
}
|
||||
};
|
||||
|
||||
const std::vector<bool> multiChannel = { true/*, false*/ };
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_LPT, ConcatWithDifferentChildrenTransformation,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(netPrecisions),
|
||||
::testing::Values(ngraph::PartialShape({ 1, 3, 10, 10 })),
|
||||
::testing::Values(CommonTestUtils::DEVICE_GPU),
|
||||
::testing::ValuesIn(testValues),
|
||||
::testing::ValuesIn(trasformationParamValues),
|
||||
::testing::ValuesIn(multiChannel)),
|
||||
::testing::ValuesIn(trasformationParamValues)),
|
||||
ConcatWithDifferentChildrenTransformation::getTestCaseName);
|
||||
} // namespace
|
||||
|
@ -13,6 +13,7 @@
|
||||
namespace LayerTestsDefinitions {
|
||||
class ConcatWithDifferentChildrenTransformationParam {
|
||||
public:
|
||||
std::int64_t axis;
|
||||
ngraph::builder::subgraph::FakeQuantizeOnData fqOnData1;
|
||||
ngraph::builder::subgraph::FakeQuantizeOnData fqOnData2;
|
||||
};
|
||||
@ -22,9 +23,8 @@ typedef std::tuple<
|
||||
ngraph::PartialShape,
|
||||
std::string, // target device: CPU, GPU
|
||||
ConcatWithDifferentChildrenTransformationParam,
|
||||
ngraph::pass::low_precision::LayerTransformation::Params, // transformation parameters
|
||||
// multichannel
|
||||
bool> ConcatWithDifferentChildrenTransformationParams;
|
||||
ngraph::pass::low_precision::LayerTransformation::Params // transformation parameters
|
||||
> ConcatWithDifferentChildrenTransformationParams;
|
||||
|
||||
class ConcatWithDifferentChildrenTransformation :
|
||||
public testing::WithParamInterface<ConcatWithDifferentChildrenTransformationParams>,
|
||||
|
@ -25,13 +25,12 @@ std::string ConcatWithDifferentChildrenTransformation::getTestCaseName(testing::
|
||||
std::string targetDevice;
|
||||
ConcatWithDifferentChildrenTransformationParam param;
|
||||
ngraph::pass::low_precision::LayerTransformation::Params params;
|
||||
bool multiChannel;
|
||||
std::tie(netPrecision, inputShapes, targetDevice, param, params, multiChannel) = obj.param;
|
||||
std::tie(netPrecision, inputShapes, targetDevice, param, params) = obj.param;
|
||||
|
||||
std::ostringstream result;
|
||||
result <<
|
||||
getTestCaseNameByParams(netPrecision, inputShapes, targetDevice, params) <<
|
||||
(multiChannel ? "_multichannel" : "") << param.fqOnData1 << param.fqOnData2;
|
||||
"_axis_" << param.axis << "_" << param.fqOnData1 << param.fqOnData2;
|
||||
|
||||
return result.str();
|
||||
}
|
||||
@ -42,8 +41,7 @@ InferenceEngine::Blob::Ptr ConcatWithDifferentChildrenTransformation::GenerateIn
|
||||
std::string targetDevice;
|
||||
ConcatWithDifferentChildrenTransformationParam param;
|
||||
ngraph::pass::low_precision::LayerTransformation::Params params;
|
||||
bool multiChannel;
|
||||
std::tie(netPrecision, inputShapes, targetDevice, param, params, multiChannel) = this->GetParam();
|
||||
std::tie(netPrecision, inputShapes, targetDevice, param, params) = this->GetParam();
|
||||
|
||||
const float k = (info.name() == "input1") ? 1.f : (info.name() == "input2" ? 2.f : 3.f);
|
||||
return LayerTransformation::GenerateInput(ngraph::element::u8, info.getTensorDesc(), k);
|
||||
@ -54,11 +52,10 @@ void ConcatWithDifferentChildrenTransformation::SetUp() {
|
||||
ngraph::PartialShape inputShapes;
|
||||
ConcatWithDifferentChildrenTransformationParam param;
|
||||
ngraph::pass::low_precision::LayerTransformation::Params params;
|
||||
bool multiChannel;
|
||||
std::tie(netPrecision, inputShapes, targetDevice, param, params, multiChannel) = this->GetParam();
|
||||
std::tie(netPrecision, inputShapes, targetDevice, param, params) = this->GetParam();
|
||||
|
||||
function = ngraph::builder::subgraph::ConcatFunction::getOriginalWithDifferentPrecisionOnChildren(
|
||||
netPrecision, inputShapes, param.fqOnData1, param.fqOnData2);
|
||||
netPrecision, inputShapes, param.axis, param.fqOnData1, param.fqOnData2);
|
||||
}
|
||||
|
||||
TEST_P(ConcatWithDifferentChildrenTransformation, CompareWithRefImpl) {
|
||||
|
@ -30,17 +30,12 @@ void CumSumLayerTest::SetUp() {
|
||||
bool exclusive, reverse;
|
||||
int64_t axis;
|
||||
std::tie(inputShapes, inputPrecision, axis, exclusive, reverse, targetDevice) = this->GetParam();
|
||||
auto inType = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inputPrecision);
|
||||
ngraph::ParameterVector paramVector;
|
||||
auto paramData = std::make_shared<ngraph::opset1::Parameter>(inType, ngraph::Shape(inputShapes));
|
||||
paramVector.push_back(paramData);
|
||||
const auto inType = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inputPrecision);
|
||||
const auto paramData = std::make_shared<ngraph::op::Parameter>(inType, ngraph::Shape(inputShapes));
|
||||
const auto axisNode = std::make_shared<ngraph::op::Constant>(ngraph::element::Type_t::i64, ngraph::Shape{}, std::vector<int64_t>{axis})->output(0);
|
||||
const auto cumSum = std::make_shared<ngraph::op::v0::CumSum>(paramData, axisNode, exclusive, reverse);
|
||||
|
||||
auto axisNode = std::make_shared<ngraph::op::Constant>(ngraph::element::Type_t::i64, ngraph::Shape{}, std::vector<int64_t>{axis})->output(0);
|
||||
|
||||
auto paramOuts = ngraph::helpers::convert2OutputVector(ngraph::helpers::castOps2Nodes<ngraph::op::Parameter>(paramVector));
|
||||
auto cumSum = std::dynamic_pointer_cast<ngraph::op::CumSum>(ngraph::builder::makeCumSum(paramOuts[0], axisNode, exclusive, reverse));
|
||||
|
||||
ngraph::ResultVector results{std::make_shared<ngraph::opset1::Result>(cumSum)};
|
||||
function = std::make_shared<ngraph::Function>(results, paramVector, "cumsum");
|
||||
ngraph::ResultVector results{std::make_shared<ngraph::op::Result>(cumSum)};
|
||||
function = std::make_shared<ngraph::Function>(results, ngraph::ParameterVector{paramData}, "cumsum");
|
||||
}
|
||||
} // namespace LayerTestsDefinitions
|
||||
|
@ -82,6 +82,7 @@ public:
|
||||
static std::shared_ptr<ngraph::Function> getOriginalWithDifferentPrecisionOnChildren(
|
||||
const ngraph::element::Type precision,
|
||||
const ngraph::PartialShape& inputShape,
|
||||
const std::int64_t axis,
|
||||
const FakeQuantizeOnData& fqOnData1,
|
||||
const FakeQuantizeOnData& fqOnData2);
|
||||
|
||||
@ -229,10 +230,12 @@ public:
|
||||
const ngraph::element::Type precision,
|
||||
const ngraph::PartialShape& inputShape,
|
||||
const bool multiChannel,
|
||||
const std::int64_t axis,
|
||||
const FakeQuantizeOnData& fqOnData1,
|
||||
const FakeQuantizeOnData& fqOnData2,
|
||||
const ngraph::element::Type precisionBeforeOp,
|
||||
const DequantizationOperations& dequantizationBefore,
|
||||
const DequantizationOperations& dequantizationBefore1,
|
||||
const DequantizationOperations& dequantizationBefore2,
|
||||
const ngraph::element::Type precisionAfterOperation,
|
||||
const DequantizationOperations& dequantizationAfter1,
|
||||
const DequantizationOperations& dequantizationAfter2);
|
||||
|
@ -600,6 +600,7 @@ std::shared_ptr<ngraph::Function> ConcatFunction::getOriginalWithStridedSlice(
|
||||
std::shared_ptr<ngraph::Function> ConcatFunction::getOriginalWithDifferentPrecisionOnChildren(
|
||||
const ngraph::element::Type precision,
|
||||
const ngraph::PartialShape& inputShape,
|
||||
const std::int64_t axis,
|
||||
const FakeQuantizeOnData& fqOnData1,
|
||||
const FakeQuantizeOnData& fqOnData2) {
|
||||
const auto input1 = std::make_shared<ngraph::opset1::Parameter>(precision, inputShape);
|
||||
@ -610,11 +611,7 @@ std::shared_ptr<ngraph::Function> ConcatFunction::getOriginalWithDifferentPrecis
|
||||
input2->set_friendly_name("input2");
|
||||
const auto fakeQuantize2 = makeFakeQuantize(input2, precision, fqOnData2);
|
||||
|
||||
const std::shared_ptr<ngraph::opset1::Concat> concat = std::make_shared<ngraph::opset1::Concat>(
|
||||
ngraph::OutputVector{ fakeQuantize1->output(0), fakeQuantize2->output(0) }, 1);
|
||||
|
||||
auto& rtInfo = concat->get_rt_info();
|
||||
rtInfo["Variant::std::string"] = std::make_shared<VariantWrapper<std::string>>("concat");
|
||||
const auto concat = std::make_shared<opset1::Concat>(OutputVector{ fakeQuantize1->output(0), fakeQuantize2->output(0) }, axis);
|
||||
|
||||
const std::vector<size_t> kernel = { 3, 3 };
|
||||
const std::vector<size_t> stride = { 1, 1 };
|
||||
@ -1687,10 +1684,12 @@ std::shared_ptr<ngraph::Function> ConcatFunction::getReferenceWithDifferentPreci
|
||||
const ngraph::element::Type precision,
|
||||
const ngraph::PartialShape& inputShape,
|
||||
const bool multiChannel,
|
||||
const std::int64_t axis,
|
||||
const FakeQuantizeOnData& fqOnData1,
|
||||
const FakeQuantizeOnData& fqOnData2,
|
||||
const ngraph::element::Type precisionBeforeOp,
|
||||
const DequantizationOperations& dequantizationBefore,
|
||||
const DequantizationOperations& dequantizationBefore1,
|
||||
const DequantizationOperations& dequantizationBefore2,
|
||||
const ngraph::element::Type precisionAfterOperation,
|
||||
const DequantizationOperations& dequantizationAfter1,
|
||||
const DequantizationOperations& dequantizationAfter2) {
|
||||
@ -1700,7 +1699,7 @@ std::shared_ptr<ngraph::Function> ConcatFunction::getReferenceWithDifferentPreci
|
||||
const auto fakeQuantize1 = makeFakeQuantizeTypeRelaxed(input1, precision, fqOnData1);
|
||||
low_precision::NetworkHelper::setOutDataPrecisionForTypeRelaxed(fakeQuantize1, precisionBeforeOp);
|
||||
fakeQuantize1->set_friendly_name("fakeQuantize1");
|
||||
const auto deqBefore1 = makeDequantization(fakeQuantize1, dequantizationBefore);
|
||||
const auto deqBefore1 = makeDequantization(fakeQuantize1, dequantizationBefore1);
|
||||
|
||||
const auto input2 = std::make_shared<ngraph::opset1::Parameter>(precision, inputShape);
|
||||
input2->set_friendly_name("input2");
|
||||
@ -1708,16 +1707,12 @@ std::shared_ptr<ngraph::Function> ConcatFunction::getReferenceWithDifferentPreci
|
||||
const auto fakeQuantize2 = makeFakeQuantizeTypeRelaxed(input2, precision, fqOnData2);
|
||||
low_precision::NetworkHelper::setOutDataPrecisionForTypeRelaxed(fakeQuantize2, precisionBeforeOp);
|
||||
fakeQuantize2->set_friendly_name("fakeQuantize2");
|
||||
const auto deqBefore2 = makeDequantization(fakeQuantize2, dequantizationBefore);
|
||||
const auto deqBefore2 = makeDequantization(fakeQuantize2, dequantizationBefore2);
|
||||
|
||||
const std::shared_ptr<ngraph::opset1::Concat> concat = std::make_shared<ngraph::opset1::Concat>(
|
||||
ngraph::OutputVector{ deqBefore1, deqBefore2 }, 1);
|
||||
const auto concat = std::make_shared<opset1::Concat>(OutputVector{ deqBefore1, deqBefore2 }, axis);
|
||||
low_precision::NetworkHelper::setOutDataPrecision(concat, precisionAfterOperation);
|
||||
concat->set_friendly_name("concat");
|
||||
|
||||
auto& rtInfo = concat->get_rt_info();
|
||||
rtInfo["Variant::std::string"] = std::make_shared<VariantWrapper<std::string>>("concat");
|
||||
|
||||
const auto lastDequantization1 = makeDequantization(concat->output(0), dequantizationAfter1);
|
||||
|
||||
const std::vector<size_t> kernel = { 3, 3 };
|
||||
@ -1741,20 +1736,18 @@ std::shared_ptr<ngraph::Function> ConcatFunction::getReferenceWithDifferentPreci
|
||||
ngraph::ResultVector results;
|
||||
results.push_back(std::make_shared<ngraph::opset1::Result>(avgPool));
|
||||
|
||||
if (!dequantizationAfter2.empty()) {
|
||||
const std::shared_ptr<ngraph::opset1::MaxPool> maxPool = std::make_shared<ngraph::opset1::MaxPool>(
|
||||
concat->output(0),
|
||||
stride,
|
||||
padBegin,
|
||||
padEnd,
|
||||
kernel,
|
||||
roundingType,
|
||||
padType);
|
||||
const std::shared_ptr<ngraph::opset1::MaxPool> maxPool = std::make_shared<ngraph::opset1::MaxPool>(
|
||||
concat->output(0),
|
||||
stride,
|
||||
padBegin,
|
||||
padEnd,
|
||||
kernel,
|
||||
roundingType,
|
||||
padType);
|
||||
|
||||
const std::shared_ptr<ngraph::Node> lastDequantization2 = makeDequantization(maxPool, dequantizationAfter2);
|
||||
lastDequantization2->set_friendly_name("MaxPool");
|
||||
results.push_back(std::make_shared<ngraph::opset1::Result>(lastDequantization2));
|
||||
}
|
||||
const std::shared_ptr<ngraph::Node> lastDequantization2 = makeDequantization(maxPool, dequantizationAfter2);
|
||||
lastDequantization2->set_friendly_name("MaxPool");
|
||||
results.push_back(std::make_shared<ngraph::opset1::Result>(lastDequantization2));
|
||||
|
||||
std::shared_ptr<ngraph::Function> function = std::make_shared<ngraph::Function>(
|
||||
results,
|
||||
|
@ -22,10 +22,14 @@ class MarkSubGraphsWithCorrectLayout(MiddleReplacementPattern):
|
||||
1. Prevents from adding Transpose operations before and after "reinterp_shape" like operations which change rank of
|
||||
the input and output tensors of this layout agnostic op.
|
||||
2. Disable attributes permutation for all intermediate ops between these "reinterp_shape" nodes.
|
||||
3. Marks nodes along the weight path of convolutions as in correct layout to not permute them from NHWC to NCHW
|
||||
3. Marks nodes along the weight path of convolutions as in correct layout to not permute them from NHWC to NCHW.
|
||||
The latest is needed for TF NCHW graphs as well. In Conv/Deconv infer functions "set_permutation()"
|
||||
ads "permutation" attr to weights data node even for NCHW, it is needed to permute Conv weights from the
|
||||
original TF layout into IE even for NCHW graphs. Therefore for TF models
|
||||
to prevent unwarranted permutations need to mark weights path as having correct layout even for NCHW graphs.
|
||||
"""
|
||||
enabled = True
|
||||
graph_condition = [lambda graph: graph.graph['layout'] == 'NHWC']
|
||||
graph_condition = [lambda graph: graph.graph['fw'] == 'tf']
|
||||
op_conditions = [lambda n: n.soft_get('op') == 'MatMul' and
|
||||
any([len(port.data.get_shape()) in (4, 5) for port in n.in_ports().values()]),
|
||||
]
|
||||
|
@ -256,6 +256,9 @@ class Convolution(Op):
|
||||
('output_feature_channel', 'input:{}'.format(weights_index)),
|
||||
])
|
||||
|
||||
# is needed to permute Conv weights from the original TF [H, W, C_IN, C_OUT] into IE [C_OUT, C_IN, H, W]
|
||||
# but for other nodes in weights subgraph permutations must turned off
|
||||
# by marking with MarkSubGraphsWithCorrectLayout even if graph layout is NCHW.
|
||||
PermuteAttrs.set_permutation(node.in_node(weights_index), node, node.soft_get('get_weights_permute', None))
|
||||
PermuteInputs().set_input_permutation(
|
||||
node.in_node(weights_index), node, 'input:{}'.format(weights_index), 'transpose')
|
||||
|
@ -99,7 +99,10 @@ class Deconvolution(Op):
|
||||
('input_feature_channel', 'input:1'),
|
||||
('output_feature_channel', 'input:1'),
|
||||
])
|
||||
|
||||
|
||||
# is needed to permute Deconv weights from the original TF [H, W, C_OUT, C_IN] into IE [C_IN, C_OUT, H, W]
|
||||
# but for other nodes in weights subgraph permutations must turned off
|
||||
# by marking with MarkSubGraphsWithCorrectLayout even if graph layout is NCHW.
|
||||
PermuteAttrs.set_permutation(node.in_node(1), node, node.soft_get('get_weights_permute', None))
|
||||
PermuteInputs().set_input_permutation(node.in_node(1), node, 'input:1', 'transpose')
|
||||
PermuteInputs().set_input_permutation(node.in_node(2), node, 'input:0', 'shape')
|
||||
|
@ -5,17 +5,11 @@
|
||||
#pragma once
|
||||
|
||||
#include <cmath>
|
||||
#include <map>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "ngraph/coordinate_transform.hpp"
|
||||
#include "ngraph/type/bfloat16.hpp"
|
||||
#include "ngraph/type/float16.hpp"
|
||||
|
||||
namespace ngraph {
|
||||
namespace runtime {
|
||||
namespace reference {
|
||||
|
||||
template <typename T, typename P>
|
||||
void cumsum(const T* arg,
|
||||
const P* axis_tensor,
|
||||
@ -23,89 +17,29 @@ void cumsum(const T* arg,
|
||||
const Shape& tensor_shape,
|
||||
const bool exclusive,
|
||||
const bool reverse) {
|
||||
NGRAPH_SUPPRESS_DEPRECATED_START
|
||||
CoordinateTransform temp_transform(tensor_shape);
|
||||
for (const Coordinate& output_coord : temp_transform) {
|
||||
out[temp_transform.index(output_coord)] = 0;
|
||||
}
|
||||
const auto rank = tensor_shape.size();
|
||||
const auto axis = axis_tensor[0] >= 0 ? axis_tensor[0] : rank + axis_tensor[0];
|
||||
const auto axis_dim = tensor_shape[axis];
|
||||
|
||||
P axis = axis_tensor[0];
|
||||
P rank = tensor_shape.size();
|
||||
const auto size_before_axis = shape_size(Shape(tensor_shape.begin(), tensor_shape.begin() + axis));
|
||||
const auto size_after_axis = shape_size(Shape(tensor_shape.begin() + axis + 1, tensor_shape.end()));
|
||||
|
||||
if (axis < -rank || axis > rank) {
|
||||
throw ngraph_error("axis must be in the range [-rank, rank]");
|
||||
}
|
||||
axis = axis < 0 ? rank + axis : axis;
|
||||
const auto reverse_shift = reverse ? -1 : 1;
|
||||
const auto element_shift = exclusive ? size_after_axis * reverse_shift : 0;
|
||||
|
||||
auto get_key = [&, axis](const Coordinate& coord) -> Coordinate {
|
||||
Coordinate result(coord.size(), 0);
|
||||
result[axis] = coord[axis];
|
||||
|
||||
for (size_t i = 0; i < coord.size(); i++) {
|
||||
result[i] = coord[i] - result[i];
|
||||
}
|
||||
return result;
|
||||
};
|
||||
|
||||
auto update_output_buffer =
|
||||
[&](size_t input_index, size_t output_index, T& prev, std::vector<std::pair<size_t, T>>& tensor_vec) -> void {
|
||||
tensor_vec[input_index].second = prev + tensor_vec[input_index].second;
|
||||
out[tensor_vec[output_index].first] = tensor_vec[input_index].second;
|
||||
|
||||
// update prev to hold the last result value to compute ruuning sum for
|
||||
// subsequent iter
|
||||
prev = out[tensor_vec[output_index].first];
|
||||
};
|
||||
|
||||
auto cum_sum = [&, exclusive, reverse](std::vector<std::pair<size_t, T>>& tensor_vec) {
|
||||
if (!reverse) {
|
||||
T prev = 0;
|
||||
for (size_t i = 0; i < tensor_vec.size(); i++) {
|
||||
if (exclusive && i == 0) {
|
||||
out[tensor_vec[i].first] = prev;
|
||||
continue;
|
||||
}
|
||||
// we will compute running sum of j-1 elements if exlusive=1 or else
|
||||
// for j elements if exclusive = 0
|
||||
size_t arg_index = exclusive == 1 ? i - 1 : i;
|
||||
update_output_buffer(arg_index, i, prev, tensor_vec);
|
||||
}
|
||||
} else // reverse == true
|
||||
{
|
||||
T prev = 0;
|
||||
for (size_t i = tensor_vec.size(); i-- > 0;) {
|
||||
if (exclusive && i == tensor_vec.size() - 1) {
|
||||
out[tensor_vec[i].first] = prev;
|
||||
continue;
|
||||
}
|
||||
// we will compute running sum of j-1 elements if exlusive=1 or else
|
||||
// for j elements if exclusive = 0
|
||||
size_t arg_index = exclusive == 1 ? i + 1 : i;
|
||||
update_output_buffer(arg_index, i, prev, tensor_vec);
|
||||
for (size_t i = 0; i < size_before_axis; ++i) {
|
||||
const auto slice_idx = i * axis_dim * size_after_axis + reverse * size_after_axis * (axis_dim - 1);
|
||||
for (size_t j = 0; j < size_after_axis; ++j) {
|
||||
const auto sequence_start_idx = slice_idx + j;
|
||||
out[sequence_start_idx] = exclusive ? T{0} : arg[sequence_start_idx];
|
||||
for (size_t k = 1; k < axis_dim; ++k) {
|
||||
const auto element_idx = sequence_start_idx + (k * size_after_axis) * reverse_shift;
|
||||
const auto in_idx = element_idx - element_shift;
|
||||
const auto previous_sum_idx = element_idx - size_after_axis * reverse_shift;
|
||||
out[element_idx] = out[previous_sum_idx] + arg[in_idx];
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Map to collect tensor elements belonging to the same axis
|
||||
std::map<Coordinate, std::vector<std::pair<size_t, T>>> map_cooord_to_val;
|
||||
CoordinateTransform input_transform(tensor_shape);
|
||||
for (const Coordinate& input_coord : input_transform) {
|
||||
// points to the current element in the input tensor
|
||||
T current = arg[input_transform.index(input_coord)];
|
||||
auto key = get_key(input_coord);
|
||||
auto index = input_transform.index(input_coord);
|
||||
if (map_cooord_to_val.find(key) != map_cooord_to_val.end()) {
|
||||
map_cooord_to_val[key].push_back(std::make_pair(index, current));
|
||||
} else {
|
||||
map_cooord_to_val.insert({key, std::vector<std::pair<size_t, T>>()});
|
||||
map_cooord_to_val[key].push_back(std::make_pair(index, current));
|
||||
}
|
||||
}
|
||||
// iterate the map and perform cumulative sum over the give axis
|
||||
for (auto& it : map_cooord_to_val) {
|
||||
cum_sum(it.second);
|
||||
}
|
||||
NGRAPH_SUPPRESS_DEPRECATED_END
|
||||
}
|
||||
} // namespace reference
|
||||
} // namespace runtime
|
||||
|
@ -428,7 +428,6 @@ set(MULTI_TEST_SRC
|
||||
backend/cosh.in.cpp
|
||||
backend/ctc_greedy_decoder.in.cpp
|
||||
backend/ctc_greedy_decoder_seq_len.in.cpp
|
||||
backend/cum_sum.in.cpp
|
||||
backend/deformable_psroi_pooling.in.cpp
|
||||
backend/detection_output.in.cpp
|
||||
backend/dft.in.cpp
|
||||
|
@ -1,165 +0,0 @@
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <algorithm>
|
||||
#include <cinttypes>
|
||||
#include <cmath>
|
||||
#include <cstdlib>
|
||||
#include <random>
|
||||
#include <string>
|
||||
|
||||
#include "gtest/gtest.h"
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "ngraph/runtime/tensor.hpp"
|
||||
#include "runtime/backend.hpp"
|
||||
#include "util/all_close.hpp"
|
||||
#include "util/all_close_f.hpp"
|
||||
#include "util/ndarray.hpp"
|
||||
#include "util/random.hpp"
|
||||
#include "util/test_control.hpp"
|
||||
#include "util/test_tools.hpp"
|
||||
|
||||
static std::mt19937_64 random_generator;
|
||||
|
||||
using namespace std;
|
||||
using namespace ngraph;
|
||||
|
||||
static string s_manifest = "${MANIFEST}";
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, cum_sum_default) {
|
||||
Shape shape{1, 4};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape);
|
||||
auto axis = make_shared<op::Parameter>(element::i32, Shape{1});
|
||||
auto f = make_shared<Function>(make_shared<op::CumSum>(A, axis), ParameterVector{A, axis});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f32, shape);
|
||||
copy_data(a, vector<float>{1, 2, 3, 4});
|
||||
auto axis_tensor = backend->create_tensor(axis->get_element_type(), axis->get_shape());
|
||||
copy_data(axis_tensor, vector<int32_t>{1});
|
||||
auto result = backend->create_tensor(element::f32, shape);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a, axis_tensor});
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{1, 3, 6, 10}), read_vector<float>(result)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, cum_sum_2dim) {
|
||||
Shape shape{2, 4};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape);
|
||||
auto axis = make_shared<op::Parameter>(element::i64, Shape{1});
|
||||
auto f = make_shared<Function>(make_shared<op::CumSum>(A, axis), ParameterVector{A, axis});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f32, shape);
|
||||
copy_data(a, vector<float>{0, 1, 2, 3, 4, 5, 6, 7});
|
||||
auto axis_tensor = backend->create_tensor(axis->get_element_type(), axis->get_shape());
|
||||
copy_data(axis_tensor, vector<int64_t>{0});
|
||||
auto result = backend->create_tensor(element::f32, shape);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a, axis_tensor});
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{0, 1, 2, 3, 4, 6, 8, 10}), read_vector<float>(result)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, cum_sum_2dim_default_axis) {
|
||||
Shape shape{2, 4};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape);
|
||||
auto f = make_shared<Function>(make_shared<op::CumSum>(A), ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f32, shape);
|
||||
copy_data(a, vector<float>{0, 1, 2, 3, 4, 5, 6, 7});
|
||||
auto result = backend->create_tensor(element::f32, shape);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{0, 1, 2, 3, 4, 6, 8, 10}), read_vector<float>(result)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, cum_sum_3d) {
|
||||
auto test_cumsum_3d = [](const int32_t axis_val) -> void {
|
||||
Shape shape{3, 2, 4};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape);
|
||||
auto axis = make_shared<op::Parameter>(element::i32, Shape{1});
|
||||
auto f = make_shared<Function>(make_shared<op::CumSum>(A, axis), ParameterVector{A, axis});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f32, shape);
|
||||
copy_data(a,
|
||||
vector<float>{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23});
|
||||
auto axis_tensor = backend->create_tensor(axis->get_element_type(), axis->get_shape());
|
||||
copy_data(axis_tensor, vector<int32_t>{axis_val});
|
||||
auto result = backend->create_tensor(element::f32, shape);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a, axis_tensor});
|
||||
|
||||
if (axis_val == 0) {
|
||||
EXPECT_TRUE(test::all_close_f(
|
||||
(vector<float>{0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16, 18, 20, 22, 24, 27, 30, 33, 36, 39, 42, 45}),
|
||||
read_vector<float>(result)));
|
||||
} else if (axis_val == 1) {
|
||||
EXPECT_TRUE(test::all_close_f(
|
||||
(vector<float>{0, 1, 2, 3, 4, 6, 8, 10, 8, 9, 10, 11, 20, 22, 24, 26, 16, 17, 18, 19, 36, 38, 40, 42}),
|
||||
read_vector<float>(result)));
|
||||
} else if (axis_val == 2) {
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{0, 1, 3, 6, 4, 9, 15, 22, 8, 17, 27, 38,
|
||||
12, 25, 39, 54, 16, 33, 51, 70, 20, 41, 63, 86}),
|
||||
read_vector<float>(result)));
|
||||
}
|
||||
};
|
||||
test_cumsum_3d(0);
|
||||
test_cumsum_3d(1);
|
||||
test_cumsum_3d(2);
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, cum_sum_2dim_allmodes) {
|
||||
auto test_cum_sum_allmodes = [](const int64_t axis_val, int exclusive, int reverse) {
|
||||
Shape shape{2, 4};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape);
|
||||
auto axis = make_shared<op::Parameter>(element::i64, Shape{1});
|
||||
auto f = make_shared<Function>(make_shared<op::CumSum>(A, axis, exclusive, reverse), ParameterVector{A, axis});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f32, shape);
|
||||
copy_data(a, vector<float>{0, 1, 2, 3, 4, 5, 6, 7});
|
||||
auto axis_tensor = backend->create_tensor(axis->get_element_type(), axis->get_shape());
|
||||
copy_data(axis_tensor, vector<int64_t>{axis_val});
|
||||
auto result = backend->create_tensor(element::f32, shape);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a, axis_tensor});
|
||||
if (axis_val == 1 && exclusive == 1 && reverse == 0) {
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{0, 0, 1, 3, 0, 4, 9, 15}), read_vector<float>(result)));
|
||||
} else if (axis_val == 1 && exclusive == 0 && reverse == 1) {
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{6, 6, 5, 3, 22, 18, 13, 7}), read_vector<float>(result)));
|
||||
} else if (axis_val == 1 && exclusive == 1 && reverse == 1) {
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{6, 5, 3, 0, 18, 13, 7, 0}), read_vector<float>(result)));
|
||||
} else if (axis_val == 0 && exclusive == 0 && reverse == 0) {
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{0, 1, 2, 3, 4, 6, 8, 10}), read_vector<float>(result)));
|
||||
} else if (axis_val == 0 && exclusive == 1 && reverse == 1) {
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{4, 5, 6, 7, 0, 0, 0, 0}), read_vector<float>(result)));
|
||||
} else if (axis_val == 0 && exclusive == 0 && reverse == 1) {
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{4, 6, 8, 10, 4, 5, 6, 7}), read_vector<float>(result)));
|
||||
}
|
||||
};
|
||||
|
||||
test_cum_sum_allmodes(1, 1, 0);
|
||||
test_cum_sum_allmodes(-1, 0, 1);
|
||||
test_cum_sum_allmodes(-1, 1, 1);
|
||||
test_cum_sum_allmodes(0, 0, 0);
|
||||
test_cum_sum_allmodes(0, 1, 1);
|
||||
test_cum_sum_allmodes(0, 0, 1);
|
||||
}
|
9
tests/layer_tests/CMakeLists.txt
Normal file
9
tests/layer_tests/CMakeLists.txt
Normal file
@ -0,0 +1,9 @@
|
||||
# Copyright (C) 2018-2021 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
cmake_minimum_required(VERSION 3.13)
|
||||
|
||||
project(layer_tests)
|
||||
|
||||
install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} DESTINATION tests COMPONENT tests EXCLUDE_FROM_ALL)
|
@ -51,5 +51,30 @@
|
||||
<model path="public/vgg16/FP16/vgg16.xml" precision="FP16" test="infer_request_inference" device="GPU" vmsize="2644886" vmpeak="3222918" vmrss="1024223" vmhwm="1567935" /> # values from {"commit_id": "2947789b3b18a724096abbd9a5c535ae3128ce05", "commit_date": "2021-07-12 23:30"} and *= 1.3
|
||||
<model path="public/vgg16/FP16/vgg16.xml" precision="FP16" test="inference_with_streams" device="CPU" vmsize="3607037" vmpeak="3607806" vmrss="2415992" vmhwm="2415992" /> # values from {"commit_id": "761e571042fa2b291d5954e523fffc1e2dfcafae", "commit_date": "2021-05-20 10:36"} and *= 1.3
|
||||
<model path="public/vgg16/FP16/vgg16.xml" precision="FP16" test="inference_with_streams" device="GPU" vmsize="2761231" vmpeak="3318697" vmrss="1047467" vmhwm="1565647" /> # values from {"commit_id": "2947789b3b18a724096abbd9a5c535ae3128ce05", "commit_date": "2021-07-12 23:30"} and *= 1.3
|
||||
<!--Models with FP16-INT8 precision-->
|
||||
<model path="intel/face-detection-adas-0001/FP16-INT8/face-detection-adas-0001.xml" precision="FP16-INT8" test="create_exenetwork" device="CPU" vmsize="845462" vmpeak="845462" vmrss="54277" vmhwm="54277" /> # values from {"commit_id": "30a30efbc44f3e74a0283acd0f9ccf2e7caf94b6", "commit_date": "2021-08-19 21:17"} and *= 1.3
|
||||
<model path="intel/face-detection-adas-0001/FP16-INT8/face-detection-adas-0001.xml" precision="FP16-INT8" test="create_exenetwork" device="GPU" vmsize="1917323" vmpeak="1917739" vmrss="581136" vmhwm="949936" /> # values from {"commit_id": "30a30efbc44f3e74a0283acd0f9ccf2e7caf94b6", "commit_date": "2021-08-19 21:17"} and *= 1.3
|
||||
<model path="intel/face-detection-adas-0001/FP16-INT8/face-detection-adas-0001.xml" precision="FP16-INT8" test="inference_with_streams" device="CPU" vmsize="1629425" vmpeak="1633361" vmrss="77937" vmhwm="77937" /> # values from {"commit_id": "30a30efbc44f3e74a0283acd0f9ccf2e7caf94b6", "commit_date": "2021-08-19 21:17"} and *= 1.3
|
||||
<model path="intel/face-detection-adas-0001/FP16-INT8/face-detection-adas-0001.xml" precision="FP16-INT8" test="inference_with_streams" device="GPU" vmsize="2147454" vmpeak="2232651" vmrss="671205" vmhwm="969524" /> # values from {"commit_id": "30a30efbc44f3e74a0283acd0f9ccf2e7caf94b6", "commit_date": "2021-08-19 21:17"} and *= 1.3
|
||||
<model path="intel/face-detection-adas-0001/FP16-INT8/face-detection-adas-0001.xml" precision="FP16-INT8" test="infer_request_inference" device="CPU" vmsize="939926" vmpeak="939926" vmrss="61235" vmhwm="61235" /> # values from {"commit_id": "30a30efbc44f3e74a0283acd0f9ccf2e7caf94b6", "commit_date": "2021-08-19 21:17"} and *= 1.3
|
||||
<model path="intel/face-detection-adas-0001/FP16-INT8/face-detection-adas-0001.xml" precision="FP16-INT8" test="infer_request_inference" device="GPU" vmsize="2009384" vmpeak="2094580" vmrss="583086" vmhwm="944008" /> # values from {"commit_id": "30a30efbc44f3e74a0283acd0f9ccf2e7caf94b6", "commit_date": "2021-08-19 21:17"} and *= 1.3
|
||||
<model path="intel/person-detection-action-recognition-0006/FP16-INT8/person-detection-action-recognition-0006.xml" precision="FP16-INT8" test="create_exenetwork" device="CPU" vmsize="985509" vmpeak="985509" vmrss="102684" vmhwm="102684" /> # values from {"commit_id": "30a30efbc44f3e74a0283acd0f9ccf2e7caf94b6", "commit_date": "2021-08-19 21:17"} and *= 1.3
|
||||
<model path="intel/person-detection-action-recognition-0006/FP16-INT8/person-detection-action-recognition-0006.xml" precision="FP16-INT8" test="create_exenetwork" device="GPU" vmsize="3018548" vmpeak="3038781" vmrss="1967492" vmhwm="2351637" /> # values from {"commit_id": "30a30efbc44f3e74a0283acd0f9ccf2e7caf94b6", "commit_date": "2021-08-19 21:17"} and *= 1.3
|
||||
<model path="intel/person-detection-action-recognition-0006/FP16-INT8/person-detection-action-recognition-0006.xml" precision="FP16-INT8" test="inference_with_streams" device="CPU" vmsize="1893320" vmpeak="1897464" vmrss="153072" vmhwm="153072" /> # values from {"commit_id": "30a30efbc44f3e74a0283acd0f9ccf2e7caf94b6", "commit_date": "2021-08-19 21:17"} and *= 1.3
|
||||
<model path="intel/person-detection-action-recognition-0006/FP16-INT8/person-detection-action-recognition-0006.xml" precision="FP16-INT8" test="inference_with_streams" device="GPU" vmsize="3234410" vmpeak="3319607" vmrss="1993123" vmhwm="2356712" /> # values from {"commit_id": "30a30efbc44f3e74a0283acd0f9ccf2e7caf94b6", "commit_date": "2021-08-19 21:17"} and *= 1.3
|
||||
<model path="intel/person-detection-action-recognition-0006/FP16-INT8/person-detection-action-recognition-0006.xml" precision="FP16-INT8" test="infer_request_inference" device="CPU" vmsize="989825" vmpeak="989825" vmrss="112465" vmhwm="112465" /> # values from {"commit_id": "30a30efbc44f3e74a0283acd0f9ccf2e7caf94b6", "commit_date": "2021-08-19 21:17"} and *= 1.3
|
||||
<model path="intel/person-detection-action-recognition-0006/FP16-INT8/person-detection-action-recognition-0006.xml" precision="FP16-INT8" test="infer_request_inference" device="GPU" vmsize="3102907" vmpeak="3188104" vmrss="1959562" vmhwm="2354128" /> # values from {"commit_id": "30a30efbc44f3e74a0283acd0f9ccf2e7caf94b6", "commit_date": "2021-08-19 21:17"} and *= 1.3
|
||||
<model path="intel/single-image-super-resolution-1032/FP16-INT8/single-image-super-resolution-1032.xml" precision="FP16-INT8" test="create_exenetwork" device="CPU" vmsize="998602" vmpeak="998602" vmrss="67246" vmhwm="67246" /> # values from {"commit_id": "30a30efbc44f3e74a0283acd0f9ccf2e7caf94b6", "commit_date": "2021-08-19 21:17"} and *= 1.3
|
||||
<model path="intel/single-image-super-resolution-1032/FP16-INT8/single-image-super-resolution-1032.xml" precision="FP16-INT8" test="create_exenetwork" device="GPU" vmsize="2190047" vmpeak="2190047" vmrss="624036" vmhwm="624036" /> # values from {"commit_id": "30a30efbc44f3e74a0283acd0f9ccf2e7caf94b6", "commit_date": "2021-08-19 21:17"} and *= 1.3
|
||||
<model path="intel/single-image-super-resolution-1032/FP16-INT8/single-image-super-resolution-1032.xml" precision="FP16-INT8" test="inference_with_streams" device="CPU" vmsize="2004033" vmpeak="2069199" vmrss="334141" vmhwm="334250" /> # values from {"commit_id": "30a30efbc44f3e74a0283acd0f9ccf2e7caf94b6", "commit_date": "2021-08-19 21:17"} and *= 1.3
|
||||
<model path="intel/single-image-super-resolution-1032/FP16-INT8/single-image-super-resolution-1032.xml" precision="FP16-INT8" test="inference_with_streams" device="GPU" vmsize="2799196" vmpeak="2799196" vmrss="1061351" vmhwm="1061351" /> # values from {"commit_id": "30a30efbc44f3e74a0283acd0f9ccf2e7caf94b6", "commit_date": "2021-08-19 21:17"} and *= 1.3
|
||||
<model path="intel/single-image-super-resolution-1032/FP16-INT8/single-image-super-resolution-1032.xml" precision="FP16-INT8" test="infer_request_inference" device="CPU" vmsize="1063769" vmpeak="1063769" vmrss="297944" vmhwm="297944" /> # values from {"commit_id": "30a30efbc44f3e74a0283acd0f9ccf2e7caf94b6", "commit_date": "2021-08-19 21:17"} and *= 1.3
|
||||
<model path="intel/single-image-super-resolution-1032/FP16-INT8/single-image-super-resolution-1032.xml" precision="FP16-INT8" test="infer_request_inference" device="GPU" vmsize="2364830" vmpeak="2450026" vmrss="711063" vmhwm="711063" /> # values from {"commit_id": "30a30efbc44f3e74a0283acd0f9ccf2e7caf94b6", "commit_date": "2021-08-19 21:17"} and *= 1.3
|
||||
<model path="intel/vehicle-attributes-recognition-barrier-0039/FP16-INT8/vehicle-attributes-recognition-barrier-0039.xml" precision="FP16-INT8" test="create_exenetwork" device="CPU" vmsize="792677" vmpeak="866647" vmrss="33794" vmhwm="33794" /> # values from {"commit_id": "30a30efbc44f3e74a0283acd0f9ccf2e7caf94b6", "commit_date": "2021-08-19 21:17"} and *= 1.3
|
||||
<model path="intel/vehicle-attributes-recognition-barrier-0039/FP16-INT8/vehicle-attributes-recognition-barrier-0039.xml" precision="FP16-INT8" test="create_exenetwork" device="GPU" vmsize="1850950" vmpeak="1871251" vmrss="265959" vmhwm="410160" /> # values from {"commit_id": "30a30efbc44f3e74a0283acd0f9ccf2e7caf94b6", "commit_date": "2021-08-19 21:17"} and *= 1.3
|
||||
<model path="intel/vehicle-attributes-recognition-barrier-0039/FP16-INT8/vehicle-attributes-recognition-barrier-0039.xml" precision="FP16-INT8" test="inference_with_streams" device="CPU" vmsize="1527453" vmpeak="1527453" vmrss="36462" vmhwm="36462" /> # values from {"commit_id": "30a30efbc44f3e74a0283acd0f9ccf2e7caf94b6", "commit_date": "2021-08-19 21:17"} and *= 1.3
|
||||
<model path="intel/vehicle-attributes-recognition-barrier-0039/FP16-INT8/vehicle-attributes-recognition-barrier-0039.xml" precision="FP16-INT8" test="inference_with_streams" device="GPU" vmsize="2028130" vmpeak="2113326" vmrss="249808" vmhwm="408948" /> # values from {"commit_id": "30a30efbc44f3e74a0283acd0f9ccf2e7caf94b6", "commit_date": "2021-08-19 21:17"} and *= 1.3
|
||||
<model path="intel/vehicle-attributes-recognition-barrier-0039/FP16-INT8/vehicle-attributes-recognition-barrier-0039.xml" precision="FP16-INT8" test="infer_request_inference" device="CPU" vmsize="792677" vmpeak="866647" vmrss="33113" vmhwm="33113" /> # values from {"commit_id": "30a30efbc44f3e74a0283acd0f9ccf2e7caf94b6", "commit_date": "2021-08-19 21:17"} and *= 1.3
|
||||
<model path="intel/vehicle-attributes-recognition-barrier-0039/FP16-INT8/vehicle-attributes-recognition-barrier-0039.xml" precision="FP16-INT8" test="infer_request_inference" device="GPU" vmsize="1932590" vmpeak="2017787" vmrss="246838" vmhwm="410576" /> # values from {"commit_id": "30a30efbc44f3e74a0283acd0f9ccf2e7caf94b6", "commit_date": "2021-08-19 21:17"} and *= 1.3
|
||||
</models>
|
||||
</attributes>
|
||||
|
@ -14,5 +14,10 @@
|
||||
<model name="mtcnn-r" precision="FP16" source="omz" />
|
||||
<model name="mobilenet-ssd" precision="FP16" source="omz" />
|
||||
<model name="ssd300" precision="FP16" source="omz" />
|
||||
<!--Models with FP16-INT8 precision-->
|
||||
<model name="vehicle-attributes-recognition-barrier-0039" precision="FP16-INT8" source="omz" />
|
||||
<model name="person-detection-action-recognition-0006" precision="FP16-INT8" source="omz" />
|
||||
<model name="face-detection-adas-0001" precision="FP16-INT8" source="omz" />
|
||||
<model name="single-image-super-resolution-1032" precision="FP16-INT8" source="omz" />
|
||||
</models>
|
||||
</attributes>
|
Loading…
Reference in New Issue
Block a user