Merge remote-tracking branch 'upstream/master'
This commit is contained in:
commit
eef72406cc
@ -36,7 +36,7 @@ clamp( x_{i} )=\min\big( \max\left( x_{i}, min\_value \right), max\_value \big)
|
||||
|
||||
**Inputs**:
|
||||
|
||||
* **1**: A tensor of type *T* and arbitrary shape. **Required**.
|
||||
* **1**: A tensor of type *T* and arbitrary shape. **Required.**
|
||||
|
||||
**Outputs**:
|
||||
|
||||
|
@ -33,7 +33,7 @@ where α corresponds to *alpha* attribute.
|
||||
|
||||
**Inputs**:
|
||||
|
||||
* **1**: A tensor of type *T* and arbitrary shape. **Required**.
|
||||
* **1**: A tensor of type *T* and arbitrary shape. **Required.**
|
||||
|
||||
**Outputs**:
|
||||
|
||||
|
@ -18,7 +18,7 @@ exp(x) = e^{x}
|
||||
|
||||
**Inputs**
|
||||
|
||||
* **1**: A tensor of type *T* and arbitrary shape. **Required**.
|
||||
* **1**: A tensor of type *T* and arbitrary shape. **Required.**
|
||||
|
||||
**Outputs**
|
||||
|
||||
|
@ -27,7 +27,7 @@ Additionally, *Gelu* function may be approximated as follows:
|
||||
|
||||
**Inputs**:
|
||||
|
||||
* **1**: A tensor of type *T* and arbitrary shape. **Required**.
|
||||
* **1**: A tensor of type *T* and arbitrary shape. **Required.**
|
||||
|
||||
**Outputs**:
|
||||
|
||||
|
@ -45,7 +45,7 @@ For `tanh` approximation mode, *Gelu* function is represented as:
|
||||
|
||||
**Inputs**:
|
||||
|
||||
* **1**: A tensor of type *T* and arbitrary shape. **Required**.
|
||||
* **1**: A tensor of type *T* and arbitrary shape. **Required.**
|
||||
|
||||
**Outputs**:
|
||||
|
||||
|
@ -19,7 +19,7 @@ The HSwish operation is introduced in the following [article](https://arxiv.org/
|
||||
|
||||
**Inputs**:
|
||||
|
||||
* **1**: Multidimensional input tensor of type *T*. **Required**.
|
||||
* **1**: Multidimensional input tensor of type *T*. **Required.**
|
||||
|
||||
**Outputs**:
|
||||
|
||||
|
@ -24,7 +24,7 @@ LogSoftmax(x, axis) = t - Log(ReduceSum(Exp(t), axis))
|
||||
|
||||
**Inputs**:
|
||||
|
||||
* **1**: Input tensor *x* of type *T* with enough number of dimension to be compatible with *axis* attribute. Required.
|
||||
* **1**: Input tensor *x* of type *T* with enough number of dimension to be compatible with *axis* attribute. **Required.**
|
||||
|
||||
**Outputs**:
|
||||
|
||||
|
@ -20,7 +20,7 @@ Mish(x) = x\cdot\tanh\big(SoftPlus(x)\big) = x\cdot\tanh\big(\ln(1+e^{x})\big)
|
||||
|
||||
**Inputs**:
|
||||
|
||||
* **1**: A tensor of type *T* and arbitrary shape. **Required**.
|
||||
* **1**: A tensor of type *T* and arbitrary shape. **Required.**
|
||||
|
||||
**Outputs**:
|
||||
|
||||
|
@ -32,8 +32,8 @@ PReLU(x) = \max(0, x) + \alpha\cdot\min(0, x)
|
||||
|
||||
**Inputs**
|
||||
|
||||
* **1**: `data`. A tensor of type *T* and arbitrary shape. **Required**.
|
||||
* **2**: `slope`. A tensor of type *T* and rank greater or equal to 1. Tensor with negative slope values. **Required**.
|
||||
* **1**: `data`. A tensor of type *T* and arbitrary shape. **Required.**
|
||||
* **2**: `slope`. A tensor of type *T* and rank greater or equal to 1. Tensor with negative slope values. **Required.**
|
||||
* **Note**: Channels dimension corresponds to the second dimension of `data` input tensor. If `slope` input rank is 1 and its dimension is equal to the second dimension of `data` input, then per channel broadcast is applied. Otherwise `slope` input is broadcasted with numpy rules, description is available in [Broadcast Rules For Elementwise Operations](../broadcast_rules.md).
|
||||
|
||||
**Outputs**
|
||||
|
@ -20,11 +20,11 @@ For each element from the input tensor calculates corresponding
|
||||
|
||||
**Inputs**:
|
||||
|
||||
* **1**: Multidimensional input tensor *x* of any supported numeric type. Required.
|
||||
* **1**: Multidimensional input tensor *x* of any supported numeric type. **Required.**
|
||||
|
||||
**Outputs**:
|
||||
|
||||
* **1**: Result of ReLU function applied to the input tensor *x*. Tensor with shape and type matching the input tensor. Required.
|
||||
* **1**: Result of ReLU function applied to the input tensor *x*. Tensor with shape and type matching the input tensor.
|
||||
|
||||
**Example**
|
||||
|
||||
|
@ -20,7 +20,7 @@ sigmoid( x ) = \frac{1}{1+e^{-x}}
|
||||
|
||||
**Inputs**:
|
||||
|
||||
* **1**: Input tensor *x* of any floating point type. Required.
|
||||
* **1**: Input tensor *x* of any floating point type. **Required.**
|
||||
|
||||
**Outputs**:
|
||||
|
||||
|
@ -27,7 +27,7 @@ where \f$C\f$ is a size of tensor along *axis* dimension.
|
||||
|
||||
**Inputs**:
|
||||
|
||||
* **1**: Input tensor with enough number of dimension to be compatible with *axis* attribute. Required.
|
||||
* **1**: Input tensor with enough number of dimension to be compatible with *axis* attribute. **Required.**
|
||||
|
||||
**Outputs**:
|
||||
|
||||
|
@ -35,7 +35,7 @@ For example, if *T* is `fp32`, `threshold` should be `20` or if *T* is `fp16`, `
|
||||
|
||||
**Inputs**:
|
||||
|
||||
* **1**: A tensor of type *T* and arbitrary shape. **Required**.
|
||||
* **1**: A tensor of type *T* and arbitrary shape. **Required.**
|
||||
|
||||
**Outputs**:
|
||||
|
||||
|
@ -22,9 +22,9 @@ where β corresponds to `beta` scalar input.
|
||||
|
||||
**Inputs**:
|
||||
|
||||
* **1**: `data`. A tensor of type *T* and arbitrary shape. **Required**.
|
||||
* **1**: `data`. A tensor of type *T* and arbitrary shape. **Required.**
|
||||
|
||||
* **2**: `beta`. A non-negative scalar value of type *T*. Multiplication parameter for the sigmoid. Default value 1.0 is used. **Optional**.
|
||||
* **2**: `beta`. A non-negative scalar value of type *T*. Multiplication parameter for the sigmoid. Default value 1.0 is used. **Optional.**
|
||||
|
||||
**Outputs**:
|
||||
|
||||
|
@ -29,8 +29,8 @@ o_{i} = a_{i} % b_{i}
|
||||
|
||||
**Inputs**
|
||||
|
||||
* **1**: A tensor of type *T* and arbitrary shape. Required.
|
||||
* **2**: A tensor of type *T* and arbitrary shape. Required.
|
||||
* **1**: A tensor of type *T* and arbitrary shape. **Required.**
|
||||
* **2**: A tensor of type *T* and arbitrary shape. **Required.**
|
||||
|
||||
**Outputs**
|
||||
|
||||
|
@ -29,8 +29,8 @@ o_{i} = max(a_{i}, b_{i})
|
||||
|
||||
**Inputs**
|
||||
|
||||
* **1**: A tensor of type *T* and arbitrary shape. Required.
|
||||
* **2**: A tensor of type *T* and arbitrary shape. Required.
|
||||
* **1**: A tensor of type *T* and arbitrary shape. **Required.**
|
||||
* **2**: A tensor of type *T* and arbitrary shape. **Required.**
|
||||
|
||||
**Outputs**
|
||||
|
||||
|
@ -27,8 +27,8 @@ o_{i} = min(a_{i}, b_{i})
|
||||
|
||||
**Inputs**
|
||||
|
||||
* **1**: A tensor of type *T* and arbitrary shape. Required.
|
||||
* **2**: A tensor of type *T* and arbitrary shape. Required.
|
||||
* **1**: A tensor of type *T* and arbitrary shape. **Required.**
|
||||
* **2**: A tensor of type *T* and arbitrary shape. **Required.**
|
||||
|
||||
**Outputs**
|
||||
|
||||
|
@ -30,8 +30,8 @@ o_{i} = a_{i} % b_{i}
|
||||
|
||||
**Inputs**
|
||||
|
||||
* **1**: A tensor of type *T* and arbitrary shape. Required.
|
||||
* **2**: A tensor of type *T* and arbitrary shape. Required.
|
||||
* **1**: A tensor of type *T* and arbitrary shape. **Required.**
|
||||
* **2**: A tensor of type *T* and arbitrary shape. **Required.**
|
||||
|
||||
**Outputs**
|
||||
|
||||
|
@ -27,8 +27,8 @@ o_{i} = {a_{i} ^ b_{i}}
|
||||
|
||||
**Inputs**
|
||||
|
||||
* **1**: A tensor of type *T* and arbitrary shape. Required.
|
||||
* **2**: A tensor of type *T* and arbitrary shape. Required.
|
||||
* **1**: A tensor of type *T* and arbitrary shape. **Required.**
|
||||
* **2**: A tensor of type *T* and arbitrary shape. **Required.**
|
||||
|
||||
**Outputs**
|
||||
|
||||
|
@ -27,8 +27,8 @@ o_{i} = (a_{i} - b_{i})^2
|
||||
|
||||
**Inputs**
|
||||
|
||||
* **1**: A tensor of type *T* and arbitrary shape. Required.
|
||||
* **2**: A tensor of type *T* and arbitrary shape. Required.
|
||||
* **1**: A tensor of type *T* and arbitrary shape. **Required.**
|
||||
* **2**: A tensor of type *T* and arbitrary shape. **Required.**
|
||||
|
||||
**Outputs**
|
||||
|
||||
|
@ -18,7 +18,7 @@ For example, if the first input tensor is `[[3, 50], [10, -1]]` and the second i
|
||||
* **Range of values**: "i64" or "i32"
|
||||
* **Type**: string
|
||||
* **Default value**: "i64"
|
||||
* **Required**: *No*
|
||||
* **Required**: *no*
|
||||
|
||||
* *with_right_bound*
|
||||
|
||||
@ -32,8 +32,8 @@ For example, if the first input tensor is `[[3, 50], [10, -1]]` and the second i
|
||||
|
||||
**Inputs**:
|
||||
|
||||
* **1**: N-D tensor of *T* type with elements for the bucketization. Required.
|
||||
* **2**: 1-D tensor of *T_BOUNDARIES* type with sorted unique boundaries for buckets. Required.
|
||||
* **1**: N-D tensor of *T* type with elements for the bucketization. **Required.**
|
||||
* **2**: 1-D tensor of *T_BOUNDARIES* type with sorted unique boundaries for buckets. **Required.**
|
||||
|
||||
**Outputs**:
|
||||
|
||||
|
@ -21,11 +21,11 @@
|
||||
* **Range of values**: `i64` or `i32`
|
||||
* **Type**: string
|
||||
* **Default value**: "i64"
|
||||
* **Required**: *No*
|
||||
* **Required**: *no*
|
||||
|
||||
**Inputs**:
|
||||
|
||||
* **1**: A tensor of type *T* and arbitrary shape. **Required**.
|
||||
* **1**: A tensor of type *T* and arbitrary shape. **Required.**
|
||||
|
||||
**Outputs**:
|
||||
|
||||
|
@ -53,7 +53,7 @@ Computation algorithm for mode *xnor-popcount*:
|
||||
* *xnor-popcount*
|
||||
* **Type**: `string`
|
||||
* **Required**: *yes*
|
||||
* **Note**: value `0` in inputs is interpreted as `-1`, value `1` as `1`
|
||||
* **Note**: value `0` in inputs is interpreted as `-1`, value `1` as `1`
|
||||
|
||||
* *pad_value*
|
||||
|
||||
@ -76,8 +76,8 @@ Computation algorithm for mode *xnor-popcount*:
|
||||
|
||||
**Inputs**:
|
||||
|
||||
* **1**: Input tensor of type *T1* and rank 4. Layout is `[N, C_IN, Y, X]` (number of batches, number of channels, spatial axes Y, X). Required.
|
||||
* **2**: Kernel tensor of type *T2* and rank 4. Layout is `[C_OUT, C_IN, Y, X]` (number of output channels, number of input channels, spatial axes Y, X). Required.
|
||||
* **1**: Input tensor of type *T1* and rank 4. Layout is `[N, C_IN, Y, X]` (number of batches, number of channels, spatial axes Y, X). **Required.**
|
||||
* **2**: Kernel tensor of type *T2* and rank 4. Layout is `[C_OUT, C_IN, Y, X]` (number of output channels, number of input channels, spatial axes Y, X). **Required.**
|
||||
* **Note**: Interpretation of tensor values is defined by *mode* attribute.
|
||||
|
||||
**Outputs**:
|
||||
|
@ -90,11 +90,11 @@ else:
|
||||
|
||||
**Inputs**:
|
||||
|
||||
* **1**: Input tensor of type *T1* and rank 3, 4 or 5. Layout is `[N, C_INPUT, Z, Y, X]` (number of batches, number of input channels, spatial axes Z, Y, X). *Required*.
|
||||
* **1**: Input tensor of type *T1* and rank 3, 4 or 5. Layout is `[N, C_INPUT, Z, Y, X]` (number of batches, number of input channels, spatial axes Z, Y, X). **Required.**
|
||||
|
||||
* **2**: Convolution kernel tensor of type *T1* and rank 3, 4 or 5. Layout is `[C_INPUT, C_OUTPUT, Z, Y, X]` (number of input channels, number of output channels, spatial axes Z, Y, X). Spatial size of the kernel is derived from the shape of this input and aren't specified by any attribute. *Required*.
|
||||
* **2**: Convolution kernel tensor of type *T1* and rank 3, 4 or 5. Layout is `[C_INPUT, C_OUTPUT, Z, Y, X]` (number of input channels, number of output channels, spatial axes Z, Y, X). Spatial size of the kernel is derived from the shape of this input and aren't specified by any attribute. **Required.**
|
||||
|
||||
* **3**: `output_shape` is 1D tensor of type *T2* that specifies spatial shape of the output. If specified, *padding amount* is deduced from relation of input and output spatial shapes according to formulas in the description. If not specified, *output shape* is calculated based on the `pads_begin` and `pads_end` or completely according to `auto_pad`. *Optional*.
|
||||
* **3**: `output_shape` is 1D tensor of type *T2* that specifies spatial shape of the output. If specified, *padding amount* is deduced from relation of input and output spatial shapes according to formulas in the description. If not specified, *output shape* is calculated based on the `pads_begin` and `pads_end` or completely according to `auto_pad`. **Optional.**
|
||||
* **Note**: Type of the convolution (1D, 2D or 3D) is derived from the rank of the input tensors and not specified by any attribute:
|
||||
* 1D convolution (input tensors rank 3) means that there is only one spatial axis X,
|
||||
* 2D convolution (input tensors rank 4) means that there are two spatial axes Y, X,
|
||||
|
@ -77,8 +77,8 @@ The receptive field in each layer is calculated using the formulas:
|
||||
|
||||
**Inputs**:
|
||||
|
||||
* **1**: Input tensor of type *T* and rank 3, 4 or 5. Layout is `[N, C_IN, Z, Y, X]` (number of batches, number of channels, spatial axes Z, Y, X). Required.
|
||||
* **2**: Kernel tensor of type *T* and rank 3, 4 or 5. Layout is `[C_OUT, C_IN, Z, Y, X]` (number of output channels, number of input channels, spatial axes Z, Y, X). Required.
|
||||
* **1**: Input tensor of type *T* and rank 3, 4 or 5. Layout is `[N, C_IN, Z, Y, X]` (number of batches, number of channels, spatial axes Z, Y, X). **Required.**
|
||||
* **2**: Kernel tensor of type *T* and rank 3, 4 or 5. Layout is `[C_OUT, C_IN, Z, Y, X]` (number of output channels, number of input channels, spatial axes Z, Y, X). **Required.**
|
||||
* **Note**: Type of the convolution (1D, 2D or 3D) is derived from the rank of the input tensors and not specified by any attribute:
|
||||
* 1D convolution (input tensors rank 3) means that there is only one spatial axis X
|
||||
* 2D convolution (input tensors rank 4) means that there are two spatial axes Y, X
|
||||
|
@ -91,11 +91,11 @@ Where
|
||||
|
||||
**Inputs**:
|
||||
|
||||
* **1**: Input tensor of type *T* and rank 4. Layout is `NCYX` (number of batches, number of channels, spatial axes Y and X). Required.
|
||||
* **1**: Input tensor of type *T* and rank 4. Layout is `NCYX` (number of batches, number of channels, spatial axes Y and X). **Required.**
|
||||
|
||||
* **2**: Offsets tensor of type *T* and rank 4. Layout is `NCYX` (number of batches, *deformable_group* \* kernel_Y \* kernel_X \* 2, spatial axes Y and X). Required.
|
||||
* **2**: Offsets tensor of type *T* and rank 4. Layout is `NCYX` (number of batches, *deformable_group* \* kernel_Y \* kernel_X \* 2, spatial axes Y and X). **Required.**
|
||||
|
||||
* **3**: Kernel tensor of type *T* and rank 4. Layout is `OIYX` (number of output channels, number of input channels, spatial axes Y and X). Required.
|
||||
* **3**: Kernel tensor of type *T* and rank 4. Layout is `OIYX` (number of output channels, number of input channels, spatial axes Y and X). **Required.**
|
||||
|
||||
|
||||
**Outputs**:
|
||||
|
@ -62,11 +62,11 @@
|
||||
|
||||
**Inputs**:
|
||||
|
||||
* **1**: Input tensor of type `T1` and rank 3, 4 or 5. Layout is `[N, GROUPS * C_IN, Z, Y, X]` (number of batches, number of channels, spatial axes Z, Y, X). Required.
|
||||
* **1**: Input tensor of type `T1` and rank 3, 4 or 5. Layout is `[N, GROUPS * C_IN, Z, Y, X]` (number of batches, number of channels, spatial axes Z, Y, X). **Required.**
|
||||
|
||||
* **2**: Kernel tensor of type `T1` and rank 4, 5 or 6. Layout is `[GROUPS, C_IN, C_OUT, X, Y, Z]` (number of groups, number of input channels, number of output channels, spatial axes X, Y, Z). Required.
|
||||
* **2**: Kernel tensor of type `T1` and rank 4, 5 or 6. Layout is `[GROUPS, C_IN, C_OUT, X, Y, Z]` (number of groups, number of input channels, number of output channels, spatial axes X, Y, Z). **Required.**
|
||||
|
||||
* **3**: Output shape tensor of type `T2` and rank 1. It specifies spatial shape of the output. Optional.
|
||||
* **3**: Output shape tensor of type `T2` and rank 1. It specifies spatial shape of the output. **Optional.**
|
||||
* **Note** Number of groups is derived from the shape of the kernel and not specified by any attribute.
|
||||
* **Note**: Type of the convolution (1D, 2D or 3D) is derived from the rank of the input tensors and not specified by any attribute:
|
||||
* 1D convolution (input tensors rank 3) means that there is only one spatial axis X
|
||||
|
@ -55,7 +55,7 @@ Neural Networks](https://proceedings.neurips.cc/paper/2012/file/c399862d3b9d6b76
|
||||
|
||||
**Inputs**:
|
||||
|
||||
* **1**: Input tensor of type *T* and rank 3, 4 or 5. Layout is `[N, GROUPS * C_IN, Z, Y, X]` (number of batches, number of channels, spatial axes Z, Y, X). Required.
|
||||
* **1**: Input tensor of type *T* and rank 3, 4 or 5. Layout is `[N, GROUPS * C_IN, Z, Y, X]` (number of batches, number of channels, spatial axes Z, Y, X). **Required.**
|
||||
* **2**: Convolution kernel tensor of type *T* and rank 4, 5 or 6. Layout is `[GROUPS, C_OUT, C_IN, Z, Y, X]` (number of groups, number of output channels, number of input channels, spatial axes Z, Y, X),
|
||||
* **Note** Number of groups is derived from the shape of the kernel and not specified by any attribute.
|
||||
* **Note**: Type of the convolution (1D, 2D or 3D) is derived from the rank of the input tensors and not specified by any attribute:
|
||||
|
@ -81,12 +81,12 @@ This operation is compatible with [MXNet DeformablePSROIPooling](https://mxnet.a
|
||||
|
||||
**Inputs**:
|
||||
|
||||
* **1**: 4D input tensor of type *T* and shape `[N_in, C_in, H_in, W_in]` with position sensitive score maps. Required.
|
||||
* **1**: 4D input tensor of type *T* and shape `[N_in, C_in, H_in, W_in]` with position sensitive score maps. **Required.**
|
||||
|
||||
* **2**: 2D input tensor of type *T* and shape `[NUM_ROIS, 5]`. It contains a list of five element tuples describing a single ROI (region of interest): `[batch_id, x_1, y_1, x_2, y_2]`. Required.
|
||||
* **2**: 2D input tensor of type *T* and shape `[NUM_ROIS, 5]`. It contains a list of five element tuples describing a single ROI (region of interest): `[batch_id, x_1, y_1, x_2, y_2]`. **Required.**
|
||||
Batch indices must be in the range of `[0, N_in-1]`.
|
||||
|
||||
* **3**: 4D input tensor of type *T* and shape `[NUM_ROIS, 2*NUM_CLASSES, group_size, group_size]` with transformation values. It contains normalized `[0, 1]` offsets for each ROI bin left top corner coordinates. Channel dimension is multiplied by `2` because of encoding two `(x, y)` coordinates. Optional.
|
||||
* **3**: 4D input tensor of type *T* and shape `[NUM_ROIS, 2*NUM_CLASSES, group_size, group_size]` with transformation values. It contains normalized `[0, 1]` offsets for each ROI bin left top corner coordinates. Channel dimension is multiplied by `2` because of encoding two `(x, y)` coordinates. **Optional.**
|
||||
|
||||
**Outputs**:
|
||||
* **1**: 4D output tensor of type *T* shape `[NUM_ROIS, output_dim, group_size, group_size]` with ROIs score maps.
|
||||
|
@ -133,12 +133,11 @@ At each feature map cell, *DetectionOutput* predicts the offsets relative to the
|
||||
|
||||
**Inputs**
|
||||
|
||||
* **1**: 2D input tensor with box logits with shape `[N, num_prior_boxes * num_loc_classes * 4]` and type *T*. `num_loc_classes` is equal to `num_classes` when `share_location` is 0 or it's equal to 1 otherwise. Required.
|
||||
* **2**: 2D input tensor with class predictions with shape `[N, num_prior_boxes * num_classes]` and type *T*. Required.
|
||||
* **3**: 3D input tensor with proposals with shape `[priors_batch_size, 1, num_prior_boxes * prior_box_size]` or `[priors_batch_size, 2, num_prior_boxes * prior_box_size]`. `priors_batch_size` is either 1 or `N`. Size of the second dimension depends on `variance_encoded_in_target`. If `variance_encoded_in_target` is equal to 0, the second dimension equals to 2 and variance values are provided for each boxes coordinates. If `variance_encoded_in_target` is equal to 1, the second dimension equals to 1 and this tensor contains proposals boxes only. `prior_box_size` is equal to 4 when `normalized` is set to 1 or it's equal to 5 otherwise. Required.
|
||||
Required.
|
||||
* **4**: 2D input tensor with additional class predictions information described in the [article](https://arxiv.org/pdf/1711.06897.pdf). Its shape must be equal to `[N, num_prior_boxes * 2]`. Optional.
|
||||
* **5**: 2D input tensor with additional box predictions information described in the [article](https://arxiv.org/pdf/1711.06897.pdf). Its shape must be equal to first input tensor shape. Optional.
|
||||
* **1**: 2D input tensor with box logits with shape `[N, num_prior_boxes * num_loc_classes * 4]` and type *T*. `num_loc_classes` is equal to `num_classes` when `share_location` is 0 or it's equal to 1 otherwise. **Required.**
|
||||
* **2**: 2D input tensor with class predictions with shape `[N, num_prior_boxes * num_classes]` and type *T*. **Required.**
|
||||
* **3**: 3D input tensor with proposals with shape `[priors_batch_size, 1, num_prior_boxes * prior_box_size]` or `[priors_batch_size, 2, num_prior_boxes * prior_box_size]`. `priors_batch_size` is either 1 or `N`. Size of the second dimension depends on `variance_encoded_in_target`. If `variance_encoded_in_target` is equal to 0, the second dimension equals to 2 and variance values are provided for each boxes coordinates. If `variance_encoded_in_target` is equal to 1, the second dimension equals to 1 and this tensor contains proposals boxes only. `prior_box_size` is equal to 4 when `normalized` is set to 1 or it's equal to 5 otherwise. **Required.**
|
||||
* **4**: 2D input tensor with additional class predictions information described in the [article](https://arxiv.org/pdf/1711.06897.pdf). Its shape must be equal to `[N, num_prior_boxes * 2]`. **Optional.**
|
||||
* **5**: 2D input tensor with additional box predictions information described in the [article](https://arxiv.org/pdf/1711.06897.pdf). Its shape must be equal to first input tensor shape. **Optional.**
|
||||
|
||||
**Outputs**
|
||||
|
||||
|
@ -69,11 +69,11 @@ For more details please see the following source:
|
||||
|
||||
* **1**: 2D input tensor of type *T* with shape `[number_of_ROIs, 4]` providing the ROIs as 4-tuples:
|
||||
[x<sub>1</sub>, y<sub>1</sub>, x<sub>2</sub>, y<sub>2</sub>]. Coordinates *x* and *y* are refer to the network's input
|
||||
*image_size*. **Required**.
|
||||
*image_size*. **Required.**
|
||||
|
||||
* **2**, ..., **L**: Pyramid of 4D input tensors with feature maps. Shape must be
|
||||
`[1, number_of_channels, layer_size[l], layer_size[l]]`. The number of channels must be the same for all layers of the
|
||||
pyramid. The layer width and height must equal to the `layer_size[l] = image_size / pyramid_scales[l]`. **Required**.
|
||||
pyramid. The layer width and height must equal to the `layer_size[l] = image_size / pyramid_scales[l]`. **Required.**
|
||||
|
||||
**Outputs**:
|
||||
|
||||
|
@ -61,9 +61,9 @@ ROIs coordinates are specified in absolute values for the average mode and in no
|
||||
|
||||
**Inputs**:
|
||||
|
||||
* **1**: 4D input tensor with shape `[N, C, H, W]` and type *T* with feature maps. Required.
|
||||
* **1**: 4D input tensor with shape `[N, C, H, W]` and type *T* with feature maps. **Required.**
|
||||
|
||||
* **2**: 2D input tensor with shape `[num_boxes, 5]`. It contains a list of five element tuples that describe a region of interest: `[batch_id, x_1, y_1, x_2, y_2]`. Required.
|
||||
* **2**: 2D input tensor with shape `[num_boxes, 5]`. It contains a list of five element tuples that describe a region of interest: `[batch_id, x_1, y_1, x_2, y_2]`. **Required.**
|
||||
Batch indices must be in the range of `[0, N-1]`.
|
||||
|
||||
**Outputs**:
|
||||
|
@ -59,9 +59,9 @@
|
||||
|
||||
**Inputs**:
|
||||
|
||||
* **1**: `output_size` - 1D tensor with two integer elements `[height, width]`. Specifies the spatial size of generated grid with boxes. Required.
|
||||
* **1**: `output_size` - 1D tensor with two integer elements `[height, width]`. Specifies the spatial size of generated grid with boxes. **Required.**
|
||||
|
||||
* **2**: `image_size` - 1D tensor with two integer elements `[image_height, image_width]` that specifies shape of the image for which boxes are generated. Optional.
|
||||
* **2**: `image_size` - 1D tensor with two integer elements `[image_height, image_width]` that specifies shape of the image for which boxes are generated. **Optional.**
|
||||
|
||||
**Outputs**:
|
||||
|
||||
|
@ -138,9 +138,9 @@
|
||||
|
||||
**Inputs**:
|
||||
|
||||
* **1**: `output_size` - 1D tensor of type *T_INT* with two elements `[height, width]`. Specifies the spatial size of generated grid with boxes. **Required**.
|
||||
* **1**: `output_size` - 1D tensor of type *T_INT* with two elements `[height, width]`. Specifies the spatial size of generated grid with boxes. **Required.**
|
||||
|
||||
* **2**: `image_size` - 1D tensor of type *T_INT* with two elements `[image_height, image_width]` that specifies shape of the image for which boxes are generated. **Required**.
|
||||
* **2**: `image_size` - 1D tensor of type *T_INT* with two elements `[image_height, image_width]` that specifies shape of the image for which boxes are generated. **Required.**
|
||||
|
||||
**Outputs**:
|
||||
|
||||
|
@ -129,11 +129,11 @@
|
||||
|
||||
**Inputs**:
|
||||
|
||||
* **1**: 4D tensor of type *T* and shape `[batch_size, 2*K, H, W]` with class prediction scores. Required.
|
||||
* **1**: 4D tensor of type *T* and shape `[batch_size, 2*K, H, W]` with class prediction scores. **Required.**
|
||||
|
||||
* **2**: 4D tensor of type *T* and shape `[batch_size, 4*K, H, W]` with deltas for each bounding box. Required.
|
||||
* **2**: 4D tensor of type *T* and shape `[batch_size, 4*K, H, W]` with deltas for each bounding box. **Required.**
|
||||
|
||||
* **3**: 1D tensor of type *T* with 3 or 4 elements: `[image_height, image_width, scale_height_and_width]` or `[image_height, image_width, scale_height, scale_width]`. Required.
|
||||
* **3**: 1D tensor of type *T* with 3 or 4 elements: `[image_height, image_width, scale_height_and_width]` or `[image_height, image_width, scale_height, scale_width]`. **Required.**
|
||||
|
||||
**Outputs**:
|
||||
|
||||
|
@ -138,11 +138,11 @@ the second optional tensor of shape `[batch_size * post_nms_topn]` with probabil
|
||||
|
||||
**Inputs**:
|
||||
|
||||
* **1**: 4D tensor of type *T* and shape `[batch_size, 2*K, H, W]` with class prediction scores. Required.
|
||||
* **1**: 4D tensor of type *T* and shape `[batch_size, 2*K, H, W]` with class prediction scores. **Required.**
|
||||
|
||||
* **2**: 4D tensor of type *T* and shape `[batch_size, 4*K, H, W]` with deltas for each bounding box. Required.
|
||||
* **2**: 4D tensor of type *T* and shape `[batch_size, 4*K, H, W]` with deltas for each bounding box. **Required.**
|
||||
|
||||
* **3**: 1D tensor of type *T* with 3 or 4 elements: `[image_height, image_width, scale_height_and_width]` or `[image_height, image_width, scale_height, scale_width]`. Required.
|
||||
* **3**: 1D tensor of type *T* with 3 or 4 elements: `[image_height, image_width, scale_height_and_width]` or `[image_height, image_width, scale_height, scale_width]`. **Required.**
|
||||
|
||||
**Outputs**
|
||||
|
||||
|
@ -55,13 +55,13 @@
|
||||
|
||||
**Inputs**:
|
||||
|
||||
* **1**: 4D input tensor of shape `[N, C, H, W]` with feature maps of type *T*. Required.
|
||||
* **1**: 4D input tensor of shape `[N, C, H, W]` with feature maps of type *T*. **Required.**
|
||||
|
||||
* **2**: 2D input tensor of shape `[NUM_ROIS, 4]` describing box consisting of 4 element tuples: `[x_1, y_1, x_2, y_2]` in relative coordinates of type *T*.
|
||||
The box height and width are calculated the following way: `roi_width = max(spatial_scale * (x_2 - x_1), 1.0)`,
|
||||
`roi_height = max(spatial_scale * (y_2 - y_1), 1.0)`, so the malformed boxes are expressed as a box of size `1 x 1`. Required.
|
||||
`roi_height = max(spatial_scale * (y_2 - y_1), 1.0)`, so the malformed boxes are expressed as a box of size `1 x 1`. **Required.**
|
||||
|
||||
* **3**: 1D input tensor of shape `[NUM_ROIS]` with batch indices of type *IND_T*. Required.
|
||||
* **3**: 1D input tensor of shape `[NUM_ROIS]` with batch indices of type *IND_T*. **Required.**
|
||||
|
||||
**Outputs**:
|
||||
|
||||
|
@ -52,9 +52,9 @@ The box height and width have different representation based on **method** attri
|
||||
|
||||
**Inputs**:
|
||||
|
||||
* **1**: 4D input tensor of shape `[N, C, H, W]` with feature maps of type *T*. Required.
|
||||
* **1**: 4D input tensor of shape `[N, C, H, W]` with feature maps of type *T*. **Required.**
|
||||
|
||||
* **2**: 2D input tensor of shape `[NUM_ROIS, 5]` describing region of interest box consisting of 5 element tuples of type *T*: `[batch_id, x_1, y_1, x_2, y_2]`. Required.
|
||||
* **2**: 2D input tensor of shape `[NUM_ROIS, 5]` describing region of interest box consisting of 5 element tuples of type *T*: `[batch_id, x_1, y_1, x_2, y_2]`. **Required.**
|
||||
Batch indices must be in the range of `[0, N-1]`.
|
||||
|
||||
|
||||
|
@ -60,9 +60,9 @@ This is a scalar that specifies padding for each spatial dimension.
|
||||
|
||||
**Inputs**
|
||||
|
||||
* **1**: `data` - Input tensor with data for interpolation. Type of elements is any supported floating point type. Required.
|
||||
* **1**: `data` - Input tensor with data for interpolation. Type of elements is any supported floating point type. **Required.**
|
||||
|
||||
* **2**: `target_spatial_shape` - 1D tensor describing output shape for spatial axes. Number of elements matches the number of indices in *axes* attribute, the order matches as well. Required.
|
||||
* **2**: `target_spatial_shape` - 1D tensor describing output shape for spatial axes. Number of elements matches the number of indices in *axes* attribute, the order matches as well. **Required.**
|
||||
|
||||
**Outputs**
|
||||
|
||||
|
@ -23,7 +23,7 @@ declared in `variable_id` and returns an error otherwise.
|
||||
|
||||
**Inputs**
|
||||
|
||||
* **1**: `new_value` - input tensor of any supported type. **Required**.
|
||||
* **1**: `new_value` - input tensor of any supported type. **Required.**
|
||||
|
||||
**Outputs**
|
||||
|
||||
|
@ -133,11 +133,11 @@ Loop operation description in the IR also has several special sections: `body`,
|
||||
|
||||
**Loop Inputs**
|
||||
|
||||
* **Trip count**: A scalar or 1D tensor with 1 element of `int64` or `int32` type specifying maximum number of iterations. *Required*.
|
||||
* **Trip count**: A scalar or 1D tensor with 1 element of `int64` or `int32` type specifying maximum number of iterations. **Required.**
|
||||
|
||||
* **ExecutionCondition**: A scalar or 1D tensor with 1 element of `boolean` type specifying whether to execute the first iteration or not. `True` value means to execute the 1st iteration. *Required*.
|
||||
* **ExecutionCondition**: A scalar or 1D tensor with 1 element of `boolean` type specifying whether to execute the first iteration or not. `True` value means to execute the 1st iteration. **Required.**
|
||||
|
||||
* **Multiple other inputs**: tensors of different types and shapes. *Optional*.
|
||||
* **Multiple other inputs**: tensors of different types and shapes. **Optional.**
|
||||
|
||||
**Loop Outputs**
|
||||
|
||||
@ -146,7 +146,7 @@ Loop operation description in the IR also has several special sections: `body`,
|
||||
|
||||
**Body Inputs**
|
||||
|
||||
* **Multiple inputs**: tensors of different types and shapes except the one corresponding to the current iteration number. This input is marked in the port_map with attribute `purpose = "current_iteration"` and produces a scalar or 1D tensor with 1 element of `int64` or `int32` type. *Optional*.
|
||||
* **Multiple inputs**: tensors of different types and shapes except the one corresponding to the current iteration number. This input is marked in the port_map with attribute `purpose = "current_iteration"` and produces a scalar or 1D tensor with 1 element of `int64` or `int32` type. **Optional.**
|
||||
|
||||
|
||||
**Body Outputs**
|
||||
|
@ -25,7 +25,7 @@ with the shape and type from the 1 input.
|
||||
|
||||
**Inputs**
|
||||
|
||||
* **1**: `init_value` - input tensor with constant values of any supported type. **Required**.
|
||||
* **1**: `init_value` - input tensor with constant values of any supported type. **Required.**
|
||||
|
||||
**Outputs**
|
||||
|
||||
|
@ -20,8 +20,8 @@
|
||||
|
||||
**Inputs**
|
||||
|
||||
* **1**: A tensor of type *T*. **Required**.
|
||||
* **2**: A tensor of type *T*. **Required**.
|
||||
* **1**: A tensor of type *T*. **Required.**
|
||||
* **2**: A tensor of type *T*. **Required.**
|
||||
|
||||
**Outputs**
|
||||
|
||||
|
@ -20,8 +20,8 @@
|
||||
|
||||
**Inputs**
|
||||
|
||||
* **1**: A tensor of type *T*. **Required**.
|
||||
* **2**: A tensor of type *T*. **Required**.
|
||||
* **1**: A tensor of type *T*. **Required.**
|
||||
* **2**: A tensor of type *T*. **Required.**
|
||||
|
||||
**Outputs**
|
||||
|
||||
|
@ -20,8 +20,8 @@
|
||||
|
||||
**Inputs**
|
||||
|
||||
* **1**: A tensor of type *T*. **Required**.
|
||||
* **2**: A tensor of type *T*. **Required**.
|
||||
* **1**: A tensor of type *T*. **Required.**
|
||||
* **2**: A tensor of type *T*. **Required.**
|
||||
|
||||
**Outputs**
|
||||
|
||||
|
@ -57,9 +57,9 @@ Two attributes, `transpose_a` and `transpose_b` specify embedded transposition f
|
||||
|
||||
**Inputs**:
|
||||
|
||||
* **1**: Tensor of type *T* with matrices A. Rank >= 1. Required.
|
||||
* **1**: Tensor of type *T* with matrices A. Rank >= 1. **Required.**
|
||||
|
||||
* **2**: Tensor of type *T* with matrices B. Rank >= 1. Required.
|
||||
* **2**: Tensor of type *T* with matrices B. Rank >= 1. **Required.**
|
||||
|
||||
**Outputs**
|
||||
|
||||
|
@ -32,9 +32,9 @@ For example, `axes_mapping = [1]` enables broadcasting of a tensor with shape `[
|
||||
|
||||
**Inputs**:
|
||||
|
||||
* **1**: `data` - source tensor of any type and shape that is being broadcasted. Required.
|
||||
* **1**: `data` - source tensor of any type and shape that is being broadcasted. **Required.**
|
||||
|
||||
* **2**: `taget_shape` - 1D integer tensor describing output shape. Required.
|
||||
* **2**: `taget_shape` - 1D integer tensor describing output shape. **Required.**
|
||||
|
||||
* **3**: `axes_mapping` - 1D integer tensor describing a list of axis indices, each index maps an axis from the 1st input tensor `data` to axis in the output. The index values in this tensor should be sorted, that disables on-the-fly transpositions of input `data` tensor while the broadcasting. `axes_mapping` input is optional depending on `mode` value.
|
||||
|
||||
|
@ -35,9 +35,9 @@ For example, `axes_mapping = [1]` enables broadcasting of a tensor with shape `[
|
||||
|
||||
**Inputs**:
|
||||
|
||||
* **1**: `data` - source tensor of type *T* and shape that is being broadcasted. Required.
|
||||
* **1**: `data` - source tensor of type *T* and shape that is being broadcasted. **Required.**
|
||||
|
||||
* **2**: `target_shape` - 1D tensor of type *T_SHAPE* describing output shape. Required.
|
||||
* **2**: `target_shape` - 1D tensor of type *T_SHAPE* describing output shape. **Required.**
|
||||
|
||||
* **3**: `axes_mapping` - 1D tensor of type *T_SHAPE* describing a list of axis indices, each index maps an axis from the 1st input tensor `data` to axis in the output. The index values in this tensor should be sorted, that disables on-the-fly transpositions of input `data` tensor while the broadcasting. `axes_mapping` input is needed for `mode` equal to *explicit* only.
|
||||
|
||||
|
@ -47,7 +47,7 @@ If `mode = depth_first`:
|
||||
|
||||
**Inputs**
|
||||
|
||||
* **1**: `data` - input tensor of type *T* with rank >= 3. **Required**.
|
||||
* **1**: `data` - input tensor of type *T* with rank >= 3. **Required.**
|
||||
|
||||
**Outputs**
|
||||
|
||||
|
@ -79,10 +79,10 @@ output = [
|
||||
|
||||
**Inputs**:
|
||||
|
||||
* **1**: Tensor of type *T*. This is a tensor of a `rank >= 1`. **Required**.
|
||||
* **1**: Tensor of type *T*. This is a tensor of a `rank >= 1`. **Required.**
|
||||
|
||||
* **2**: Tensor of type *T_IND* with the same rank as the input. All index values are expected to be within
|
||||
bounds `[0, s-1]`, where `s` is size along `axis` dimension of the `data` tensor. **Required**.
|
||||
bounds `[0, s-1]`, where `s` is size along `axis` dimension of the `data` tensor. **Required.**
|
||||
|
||||
**Outputs**:
|
||||
|
||||
|
@ -107,7 +107,7 @@ output = [[2], [5], [11], [13], [19], [23]], shape = (6, 1)
|
||||
|
||||
**Inputs**:
|
||||
|
||||
* **1**: `data` tensor of type *T*. This is a tensor of a rank not less than 1. Required.
|
||||
* **1**: `data` tensor of type *T*. This is a tensor of a rank not less than 1. **Required.**
|
||||
|
||||
* **2**: `indices` tensor of type *T_IND*. This is a tensor of a rank not less than 1.
|
||||
It requires that all indices from this tensor will be in a range `[0, s-1]` where `s` is corresponding dimension to which this index is applied.
|
||||
|
@ -43,13 +43,13 @@ Element data types for all input tensors should match each other.
|
||||
|
||||
**Inputs**
|
||||
|
||||
* **1**: `step_ids` -- a tensor of shape `[MAX_TIME, BATCH_SIZE, BEAM_WIDTH]` of type *T* with indices from per each step. Required.
|
||||
* **1**: `step_ids` -- a tensor of shape `[MAX_TIME, BATCH_SIZE, BEAM_WIDTH]` of type *T* with indices from per each step. **Required.**
|
||||
|
||||
* **2**: `parent_idx` -- a tensor of shape `[MAX_TIME, BATCH_SIZE, BEAM_WIDTH]` of type *T* with parent beam indices. Required.
|
||||
* **2**: `parent_idx` -- a tensor of shape `[MAX_TIME, BATCH_SIZE, BEAM_WIDTH]` of type *T* with parent beam indices. **Required.**
|
||||
|
||||
* **3**: `max_seq_len` -- a tensor of shape `[BATCH_SIZE]` of type *T* with maximum lengths for each sequence in the batch. Required.
|
||||
* **3**: `max_seq_len` -- a tensor of shape `[BATCH_SIZE]` of type *T* with maximum lengths for each sequence in the batch. **Required.**
|
||||
|
||||
* **4**: `end_token` -- a scalar tensor of type *T* with value of the end marker in a sequence. Required.
|
||||
* **4**: `end_token` -- a scalar tensor of type *T* with value of the end marker in a sequence. **Required.**
|
||||
|
||||
|
||||
**Outputs**
|
||||
|
@ -17,11 +17,11 @@ Where `axis` is the value from the third input.
|
||||
|
||||
**Inputs**
|
||||
|
||||
* **1**: Tensor with arbitrary data. Required.
|
||||
* **1**: Tensor with arbitrary data. **Required.**
|
||||
|
||||
* **2**: Tensor with indices to gather. The values for indices are in the range `[0, input1[axis] - 1]`. Required.
|
||||
* **2**: Tensor with indices to gather. The values for indices are in the range `[0, input1[axis] - 1]`. **Required.**
|
||||
|
||||
* **3**: Scalar or 1D tensor *axis* is a dimension index to gather data from. For example, *axis* equal to 1 means that gathering is performed over the first dimension. Negative value means reverse indexing. Allowed values are from `[-len(input1.shape), len(input1.shape) - 1]`. Required.
|
||||
* **3**: Scalar or 1D tensor *axis* is a dimension index to gather data from. For example, *axis* equal to 1 means that gathering is performed over the first dimension. Negative value means reverse indexing. Allowed values are from `[-len(input1.shape), len(input1.shape) - 1]`. **Required.**
|
||||
|
||||
**Outputs**
|
||||
|
||||
@ -59,4 +59,3 @@ Where `axis` is the value from the third input.
|
||||
</output>
|
||||
</layer>
|
||||
```
|
||||
|
||||
|
@ -134,17 +134,17 @@ output_shape = (2, 3)
|
||||
|
||||
**Inputs**
|
||||
|
||||
* **1**: `data` tensor of type *T* with arbitrary data. **Required**.
|
||||
* **1**: `data` tensor of type *T* with arbitrary data. **Required.**
|
||||
|
||||
* **2**: `indices` tensor of type *T_IND* with indices to gather. 0D tensor (scalar) for indices is also allowed.
|
||||
The values for indices are in the range `[0, data[axis] - 1]`.
|
||||
**Required**.
|
||||
**Required.**
|
||||
|
||||
* **3**: Scalar or 1D tensor `axis` of *T_AXIS* type is a dimension index to gather data from. For example,
|
||||
*axis* equal to 1 means that gathering is performed over the first dimension. Negative `axis` means reverse indexing and
|
||||
will be normalized to value `axis = data.rank + axis`. Allowed values are from `[-len(data.shape), len(data.shape) - 1]`
|
||||
and `axis' >= batch_dims'`. Where `axis'` and `batch_dims'` stand for normalized `batch_dims` and `axis` values.
|
||||
**Required**.
|
||||
**Required.**
|
||||
|
||||
**Outputs**
|
||||
|
||||
|
@ -144,18 +144,18 @@ output = [1, 4, 5]
|
||||
|
||||
**Inputs**
|
||||
|
||||
* **1**: `data` tensor of type *T* with arbitrary data. **Required**.
|
||||
* **1**: `data` tensor of type *T* with arbitrary data. **Required.**
|
||||
|
||||
* **2**: `indices` tensor of type *T_IND* with indices to gather. 0D tensor (scalar) for indices is also allowed.
|
||||
The values for indices are in the range `[-data[axis], data[axis] - 1]`.
|
||||
Negative values of indices indicate reverse indexing from `data[axis]`.
|
||||
**Required**.
|
||||
**Required.**
|
||||
|
||||
* **3**: Scalar or 1D tensor `axis` of *T_AXIS* type is a dimension index to gather data from. For example,
|
||||
*axis* equal to 1 means that gathering is performed over the first dimension. Negative `axis` means reverse indexing and
|
||||
will be normalized to value `axis = data.rank + axis`. Allowed values are from `[-len(data.shape), len(data.shape) - 1]`
|
||||
and `axis' >= batch_dims'`. Where `axis'` and `batch_dims'` stand for normalized `batch_dims` and `axis` values.
|
||||
**Required**.
|
||||
**Required.**
|
||||
|
||||
**Outputs**
|
||||
|
||||
|
@ -73,13 +73,13 @@ OUTPUT =
|
||||
|
||||
**Inputs**
|
||||
|
||||
* **1**: `data` tensor of arbitrary shape and type *T*. Required.
|
||||
* **1**: `data` tensor of arbitrary shape and type *T*. **Required.**
|
||||
|
||||
* **2**: `pads_begin` 1D tensor of type *T_INT*. Number of elements matches the number of indices in *data* attribute. Specifies the number of padding elements at the beginning of each axis. Required.
|
||||
* **2**: `pads_begin` 1D tensor of type *T_INT*. Number of elements matches the number of indices in *data* attribute. Specifies the number of padding elements at the beginning of each axis. **Required.**
|
||||
|
||||
* **3**: `pads_end` 1D tensor of type *T_INT*. Number of elements matches the number of indices in *data* attribute. Specifies the number of padding elements at the ending of each axis. Required.
|
||||
* **3**: `pads_end` 1D tensor of type *T_INT*. Number of elements matches the number of indices in *data* attribute. Specifies the number of padding elements at the ending of each axis. **Required.**
|
||||
|
||||
* **4**: `pad_value` scalar tensor of type *T*. Used with the `pad_mode = "constant"` only. All new elements are populated with this value or with 0 if input not provided. Shouldn't be set for other `pad_mode` values. Optional.
|
||||
* **4**: `pad_value` scalar tensor of type *T*. Used with the `pad_mode = "constant"` only. All new elements are populated with this value or with 0 if input not provided. Shouldn't be set for other `pad_mode` values. **Optional.**
|
||||
|
||||
|
||||
**Outputs**
|
||||
|
@ -28,9 +28,9 @@
|
||||
|
||||
**Inputs**:
|
||||
|
||||
* **1**: tensor with input data to reverse. Required.
|
||||
* **1**: tensor with input data to reverse. **Required.**
|
||||
|
||||
* **2**: 1D tensor populated with integers with sequence lengths in the 1st input tensor. Required.
|
||||
* **2**: 1D tensor populated with integers with sequence lengths in the 1st input tensor. **Required.**
|
||||
|
||||
**Example**
|
||||
|
||||
|
@ -24,17 +24,17 @@ output[i][j][indices[i][j][k]] = updates[i][j][k] if axis = 2
|
||||
|
||||
**Inputs**:
|
||||
|
||||
* **1**: `data` tensor of arbitrary rank `r` and of type *T*. Required.
|
||||
* **1**: `data` tensor of arbitrary rank `r` and of type *T*. **Required.**
|
||||
|
||||
* **2**: `indices` tensor with indices of type *T_IND*. The rank of the tensor is equal to the rank of `data` tensor.
|
||||
All index values are expected to be within bounds `[0, s - 1]` along axis of size `s`. If multiple indices point to the
|
||||
same output location then the order of updating the values is undefined. If an index points to non-existing output
|
||||
tensor element or is negative then exception is raised. Required.
|
||||
tensor element or is negative then exception is raised. **Required.**
|
||||
|
||||
* **3**: `updates` tensor of shape equal to the shape of `indices` tensor and of type *T*. Required.
|
||||
* **3**: `updates` tensor of shape equal to the shape of `indices` tensor and of type *T*. **Required.**
|
||||
|
||||
* **4**: `axis` tensor with scalar or 1D tensor with one element of type *T_AXIS* specifying axis for scatter.
|
||||
The value can be in range `[-r, r - 1]` where `r` is the rank of `data`. Required.
|
||||
The value can be in range `[-r, r - 1]` where `r` is the rank of `data`. **Required.**
|
||||
|
||||
**Outputs**:
|
||||
|
||||
|
@ -44,11 +44,11 @@ output = [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
|
||||
|
||||
**Inputs**:
|
||||
|
||||
* **1**: `data` tensor of arbitrary rank `r` >= 1 and of type *T*. Required.
|
||||
* **1**: `data` tensor of arbitrary rank `r` >= 1 and of type *T*. **Required.**
|
||||
|
||||
* **2**: `indices` tensor with indices of arbitrary rank `q` >= 1 and of type *T_IND*. All index values `i_j` in index entry `(i_0, i_1, ...,i_k)` (where `k = indices.shape[-1]`) must be within bounds `[0, s_j - 1]` where `s_j = data.shape[j]`. `k` must be at most `r`. Required.
|
||||
* **2**: `indices` tensor with indices of arbitrary rank `q` >= 1 and of type *T_IND*. All index values `i_j` in index entry `(i_0, i_1, ...,i_k)` (where `k = indices.shape[-1]`) must be within bounds `[0, s_j - 1]` where `s_j = data.shape[j]`. `k` must be at most `r`. **Required.**
|
||||
|
||||
* **3**: `updates` tensor of rank `r - indices.shape[-1] + q - 1` of type *T*. If expected `updates` rank is 0D it can be a tensor with single element. Required.
|
||||
* **3**: `updates` tensor of rank `r - indices.shape[-1] + q - 1` of type *T*. If expected `updates` rank is 0D it can be a tensor with single element. **Required.**
|
||||
|
||||
**Outputs**:
|
||||
|
||||
|
@ -31,17 +31,17 @@ but allows scattering for the arbitrary axis.
|
||||
|
||||
**Inputs**:
|
||||
|
||||
* **1**: `data` tensor of arbitrary rank `r` and of type *T*. Required.
|
||||
* **1**: `data` tensor of arbitrary rank `r` and of type *T*. **Required.**
|
||||
|
||||
* **2**: `indices` tensor with indices of type *T_IND*.
|
||||
All index values are expected to be within bounds `[0, s - 1]` along axis of size `s`. If multiple indices point to the
|
||||
same output location then the order of updating the values is undefined. If an index points to non-existing output
|
||||
tensor element or is negative then an exception is raised. Required.
|
||||
tensor element or is negative then an exception is raised. **Required.**
|
||||
|
||||
* **3**: `updates` tensor of type *T*. Required.
|
||||
* **3**: `updates` tensor of type *T*. **Required.**
|
||||
|
||||
* **4**: `axis` tensor with scalar or 1D tensor with one element of type *T_AXIS* specifying axis for scatter.
|
||||
The value can be in range `[-r, r - 1]` where `r` is the rank of `data`. Required.
|
||||
The value can be in range `[-r, r - 1]` where `r` is the rank of `data`. **Required.**
|
||||
|
||||
**Outputs**:
|
||||
|
||||
|
@ -41,7 +41,7 @@ where `group` is the layer attribute described below.
|
||||
* **Range of values**: an integer number in the range `[-rank(data_shape), rank(data_shape) - 1]`
|
||||
* **Type**: `int`
|
||||
* **Default value**: 1
|
||||
* **Required**: *No*
|
||||
* **Required**: *no*
|
||||
|
||||
* *group*
|
||||
|
||||
@ -49,7 +49,7 @@ where `group` is the layer attribute described below.
|
||||
* **Range of values**: a positive integer in the range `[1, data_shape[axis]]`
|
||||
* **Type**: `int`
|
||||
* **Default value**: 1
|
||||
* **Required**: *No*
|
||||
* **Required**: *no*
|
||||
|
||||
**Inputs**:
|
||||
|
||||
|
@ -23,7 +23,7 @@
|
||||
|
||||
**Inputs**
|
||||
|
||||
* **1**: Input tensor with element of any floating point type and `2 <= rank <=4`. Required.
|
||||
* **1**: Input tensor with element of any floating point type and `2 <= rank <=4`. **Required.**
|
||||
|
||||
**Outputs**
|
||||
|
||||
|
@ -48,7 +48,7 @@ o_{i}=\frac{o_{i}}{\sum \sqrt {o_{k}^2}+\epsilon}
|
||||
|
||||
**Inputs**
|
||||
|
||||
* **1**: 4D or 5D input tensor of any floating point type. Required.
|
||||
* **1**: 4D or 5D input tensor of any floating point type. **Required.**
|
||||
|
||||
**Outputs**
|
||||
|
||||
|
@ -53,9 +53,9 @@ o_{i}=\frac{o_{i}}{\sqrt {\sum {o_{k}^2}}+\epsilon}
|
||||
|
||||
**Inputs**
|
||||
|
||||
* **1**: `data` - Input tensor to be normalized. Type *T*. Required.
|
||||
* **1**: `data` - Input tensor to be normalized. Type *T*. **Required.**
|
||||
|
||||
* **2**: `axes` - 1D tensor which specifies indices of dimensions in `data` that define normalization slices. Allowed range of axes is `[-r; r-1]` where `r = rank(data)`, the order can be not sorted. Negative value means counting dimensions from the back. Type *T_IND*. Required.
|
||||
* **2**: `axes` - 1D tensor which specifies indices of dimensions in `data` that define normalization slices. Allowed range of axes is `[-r; r-1]` where `r = rank(data)`, the order can be not sorted. Negative value means counting dimensions from the back. Type *T_IND*. **Required.**
|
||||
|
||||
**Outputs**
|
||||
|
||||
|
@ -28,8 +28,8 @@ Output(i,j,k) = \frac{Input[d_{start}:d_{end}, h_{start}:h_{end}, w_{start}:w_{e
|
||||
|
||||
**Inputs**:
|
||||
|
||||
* **1**: 3D, 4D, or 5D input tensor of shape `[N, C, H]`, `[N, C, H, W]` or `[N, C, D, H, W]` and type *T*. Required.
|
||||
* **2**: 1D tensor describing output shape for spatial dimensions. Can be `[H_out]` for 3D input, `[H_out, W_out]` for 4D input, `[D_out, H_out, W_out]` for 5D input and of type *T_SHAPE*. Required.
|
||||
* **1**: 3D, 4D, or 5D input tensor of shape `[N, C, H]`, `[N, C, H, W]` or `[N, C, D, H, W]` and type *T*. **Required.**
|
||||
* **2**: 1D tensor describing output shape for spatial dimensions. Can be `[H_out]` for 3D input, `[H_out, W_out]` for 4D input, `[D_out, H_out, W_out]` for 5D input and of type *T_SHAPE*. **Required.**
|
||||
|
||||
**Outputs**:
|
||||
|
||||
|
@ -34,12 +34,12 @@ Output(i,j,k) = max(Input[d_{start}:d_{end}, h_{start}:h_{end}, w_{start}:w_{end
|
||||
* **Range of values**: "i64" or "i32"
|
||||
* **Type**: string
|
||||
* **Default value**: "i64"
|
||||
* **Required**: *No*
|
||||
* **Required**: *no*
|
||||
|
||||
**Inputs**:
|
||||
|
||||
* **1**: 3D, 4D, or 5D input tensor of shape `[N, C, H]`, `[N, C, H, W]` or `[N, C, D, H, W]` and type *T*. Required.
|
||||
* **2**: 1D tensor describing output shape for spatial dimensions. Can be `[H_out]` for 3D input, `[H_out, W_out]` for 4D input, `[D_out, H_out, W_out]` for 5D input and of type *T_SHAPE*. Required.
|
||||
* **1**: 3D, 4D, or 5D input tensor of shape `[N, C, H]`, `[N, C, H, W]` or `[N, C, D, H, W]` and type *T*. **Required.**
|
||||
* **2**: 1D tensor describing output shape for spatial dimensions. Can be `[H_out]` for 3D input, `[H_out, W_out]` for 4D input, `[D_out, H_out, W_out]` for 5D input and of type *T_SHAPE*. **Required.**
|
||||
|
||||
**Outputs**:
|
||||
|
||||
|
@ -74,7 +74,7 @@
|
||||
|
||||
**Inputs**:
|
||||
|
||||
* **1**: 3D, 4D or 5D input tensor. Required.
|
||||
* **1**: 3D, 4D or 5D input tensor. **Required.**
|
||||
|
||||
**Outputs**:
|
||||
* **1**: Input shape can be either `[N,C,H]`, `[N,C,H,W]` or `[N,C,H,W,D]`. Then the corresponding output shape is `[N,C,H_out]`, `[N,C,H_out,W_out]` or `[N,C,H_out,W_out,D_out]`.
|
||||
|
@ -63,7 +63,7 @@
|
||||
|
||||
**Inputs**:
|
||||
|
||||
* **1**: 3D, 4D or 5D input tensor of type *T*. Required.
|
||||
* **1**: 3D, 4D or 5D input tensor of type *T*. **Required.**
|
||||
|
||||
**Outputs**:
|
||||
* **1**: Input shape can be either `[N, C, H]`, `[N, C, H, W]` or `[N, C, H, W, D]`. Then the corresponding output shape will be `[N, C, H_out]`, `[N, C, H_out, W_out]` or `[N, C, H_out, W_out, D_out]`. Output tensor has the same data type as input tensor.
|
||||
|
@ -36,29 +36,34 @@ else:
|
||||
* **Description**: specifies rules used for auto-broadcasting of input tensors.
|
||||
* **Range of values**:
|
||||
* *none* - no auto-broadcasting is allowed, all input shapes should match
|
||||
* *numpy* - numpy broadcasting rules, aligned with ONNX Broadcasting. Description is available in <a href="https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md">ONNX docs</a>
|
||||
* *pdpd* - PaddlePaddle-style implicit broadcasting.
|
||||
* *numpy* - numpy broadcasting rules, description is available in [Broadcast Rules For Elementwise Operations](../broadcast_rules.md),
|
||||
* *pdpd* - PaddlePaddle-style implicit broadcasting, description is available in [Broadcast Rules For Elementwise Operations](../broadcast_rules.md).
|
||||
* **Type**: string
|
||||
* **Default value**: "numpy"
|
||||
* **Required**: *no*
|
||||
|
||||
**Inputs**:
|
||||
|
||||
* **1**: `X` - multidimensional input tensor of floating type to be quantized. Required.
|
||||
* **1**: `X` - tensor of type *T_F* and arbitrary shape. **Required.**
|
||||
|
||||
* **2**: `input_low` - minimum limit for input value. The shape must be broadcastable to the shape of *X*. Required.
|
||||
* **2**: `input_low` - tensor of type *T_F* with minimum limit for input value. The shape must be broadcastable to the shape of *X*. **Required.**
|
||||
|
||||
* **3**: `input_high` - maximum limit for input value. Can be the same as `input_low` for binarization. The shape must be broadcastable to the shape of *X*. Required.
|
||||
* **3**: `input_high` - tensor of type *T_F* with maximum limit for input value. Can be the same as `input_low` for binarization. The shape must be broadcastable to the shape of *X*. **Required.**
|
||||
|
||||
* **4**: `output_low` - minimum quantized value. The shape must be broadcastable to the shape of *X*. Required.
|
||||
* **4**: `output_low` - tensor of type *T_F* with minimum quantized value. The shape must be broadcastable to the shape of *X*. **Required.**
|
||||
|
||||
* **5**: `output_high` - maximum quantized value. The shape must be broadcastable to the of *X*. Required.
|
||||
* **5**: `output_high` - tensor of type *T_F* with maximum quantized value. The shape must be broadcastable to the of *X*. **Required.**
|
||||
|
||||
**Outputs**:
|
||||
|
||||
* **1**: `Y` - resulting tensor with shape and type matching the 1st input tensor *X*.
|
||||
* **1**: output tensor of type *T_F* with shape and type matching the 1st input tensor *X*.
|
||||
|
||||
**Types**
|
||||
|
||||
* *T_F*: any supported floating point type.
|
||||
|
||||
**Example**
|
||||
|
||||
```xml
|
||||
<layer … type="FakeQuantize"…>
|
||||
<data levels="2"/>
|
||||
|
@ -29,7 +29,7 @@ The main difference between [CTCGreedyDecoder](CTCGreedyDecoder_1.md) and CTCGre
|
||||
* **Range of values**: true or false
|
||||
* **Type**: `boolean`
|
||||
* **Default value**: true
|
||||
* **Required**: *No*
|
||||
* **Required**: *no*
|
||||
|
||||
* *classes_index_type*
|
||||
|
||||
@ -37,7 +37,7 @@ The main difference between [CTCGreedyDecoder](CTCGreedyDecoder_1.md) and CTCGre
|
||||
* **Range of values**: "i64" or "i32"
|
||||
* **Type**: string
|
||||
* **Default value**: "i32"
|
||||
* **Required**: *No*
|
||||
* **Required**: *no*
|
||||
|
||||
* *sequence_length_type*
|
||||
|
||||
@ -45,7 +45,7 @@ The main difference between [CTCGreedyDecoder](CTCGreedyDecoder_1.md) and CTCGre
|
||||
* **Range of values**: "i64" or "i32"
|
||||
* **Type**: string
|
||||
* **Default value**: "i32"
|
||||
* **Required**: *No*
|
||||
* **Required**: *no*
|
||||
|
||||
**Inputs**
|
||||
|
||||
@ -53,7 +53,7 @@ The main difference between [CTCGreedyDecoder](CTCGreedyDecoder_1.md) and CTCGre
|
||||
|
||||
* **2**: `sequence_length` - input tensor of type *T_I* of shape `[N]` with sequence lengths. The values of sequence length must be less or equal to `T`. **Required.**
|
||||
|
||||
* **3**: `blank_index` - scalar or 1D tensor with 1 element of type *T_I*. Specifies the class index to use for the blank class. Regardless of the value of `merge_repeated` attribute, if the output index for a given batch and time step corresponds to the `blank_index`, no new element is emitted. Default value is `C-1`. **Optional**.
|
||||
* **3**: `blank_index` - scalar or 1D tensor with 1 element of type *T_I*. Specifies the class index to use for the blank class. Regardless of the value of `merge_repeated` attribute, if the output index for a given batch and time step corresponds to the `blank_index`, no new element is emitted. Default value is `C-1`. **Optional.**
|
||||
|
||||
**Output**
|
||||
|
||||
|
@ -80,15 +80,15 @@ Having log-probabilities for aligned paths, log of summed up probabilities for t
|
||||
|
||||
**Inputs**
|
||||
|
||||
* **1**: `logits` - Input tensor with a batch of sequences of logits. Type of elements is *T_F*. Shape of the tensor is `[N, T, C]`, where `N` is the batch size, `T` is the maximum sequence length and `C` is the number of classes including the blank. Required.
|
||||
* **1**: `logits` - Input tensor with a batch of sequences of logits. Type of elements is *T_F*. Shape of the tensor is `[N, T, C]`, where `N` is the batch size, `T` is the maximum sequence length and `C` is the number of classes including the blank. **Required.**
|
||||
|
||||
* **2**: `logit_length` - 1D input tensor of type *T1* and of a shape `[N]`. The tensor must consist of non-negative values not greater than `T`. Lengths of input sequences of logits `logits[i,:,:]`. Required.
|
||||
* **2**: `logit_length` - 1D input tensor of type *T1* and of a shape `[N]`. The tensor must consist of non-negative values not greater than `T`. Lengths of input sequences of logits `logits[i,:,:]`. **Required.**
|
||||
|
||||
* **3**: `labels` - 2D tensor with shape `[N, T]` of type *T2*. A length of a target sequence `labels[i,:]` is equal to `label_length[i]` and must contain of integers from a range `[0; C-1]` except `blank_index`. Required.
|
||||
* **3**: `labels` - 2D tensor with shape `[N, T]` of type *T2*. A length of a target sequence `labels[i,:]` is equal to `label_length[i]` and must contain of integers from a range `[0; C-1]` except `blank_index`. **Required.**
|
||||
|
||||
* **4**: `label_length` - 1D tensor of type *T1* and of a shape `[N]`. The tensor must consist of non-negative values not greater than `T` and `label_length[i] <= logit_length[i]` for all possible `i`. Required.
|
||||
* **4**: `label_length` - 1D tensor of type *T1* and of a shape `[N]`. The tensor must consist of non-negative values not greater than `T` and `label_length[i] <= logit_length[i]` for all possible `i`. **Required.**
|
||||
|
||||
* **5**: `blank_index` - Scalar of type *T2*. Set the class index to use for the blank label. Default value is `C-1`. Optional.
|
||||
* **5**: `blank_index` - Scalar of type *T2*. Set the class index to use for the blank label. Default value is `C-1`. **Optional.**
|
||||
|
||||
**Output**
|
||||
|
||||
|
@ -31,10 +31,10 @@ The types of input scalars `on_value` and `off_value` should match and be equal
|
||||
|
||||
**Inputs**:
|
||||
|
||||
* **1**: `indices`: input tensor of type *T1* with non-negative indices, behavior for negative indices is undefined. Can be 0D. Required.
|
||||
* **2**: `depth`: positive scalar (0D tensor) of type *T1* that specifies the number of classes and thus the size of the one-hot dimension. Required.
|
||||
* **3**: `on_value`: scalar (0D tensor) of type *T2* that fills the locations in output tensor specified in `indices`. Required.
|
||||
* **4**: `off_value`: scalar (0D tensor) of type *T2* that fills the locations not represented in `indices`. Required.
|
||||
* **1**: `indices`: input tensor of type *T1* with non-negative indices, behavior for negative indices is undefined. Can be 0D. **Required.**
|
||||
* **2**: `depth`: positive scalar (0D tensor) of type *T1* that specifies the number of classes and thus the size of the one-hot dimension. **Required.**
|
||||
* **3**: `on_value`: scalar (0D tensor) of type *T2* that fills the locations in output tensor specified in `indices`. **Required.**
|
||||
* **4**: `off_value`: scalar (0D tensor) of type *T2* that fills the locations not represented in `indices`. **Required.**
|
||||
|
||||
**Outputs**:
|
||||
|
||||
|
@ -25,9 +25,9 @@ If `special_zero` is set to `true` index of `0` cannot be larger than the rank o
|
||||
|
||||
**Inputs**:
|
||||
|
||||
* **1**: `data` a tensor of type *T* and arbitrary shape. **Required**.
|
||||
* **1**: `data` a tensor of type *T* and arbitrary shape. **Required.**
|
||||
|
||||
* **2**: `shape` 1D tensor of type *T_SHAPE* describing output shape. **Required**.
|
||||
* **2**: `shape` 1D tensor of type *T_SHAPE* describing output shape. **Required.**
|
||||
|
||||
**Outputs**:
|
||||
|
||||
|
@ -10,7 +10,7 @@
|
||||
|
||||
**Inputs**:
|
||||
|
||||
* **1**: Arbitrary input tensor. Required.
|
||||
* **1**: Arbitrary input tensor. **Required.**
|
||||
|
||||
**Outputs**:
|
||||
|
||||
|
@ -14,11 +14,11 @@
|
||||
* **Range of values**: "i64" or "i32"
|
||||
* **Type**: string
|
||||
* **Default value**: "i64"
|
||||
* **Required**: *No*
|
||||
* **Required**: *no*
|
||||
|
||||
**Inputs**:
|
||||
|
||||
* **1**: Arbitrary input tensor of type *T*. Required.
|
||||
* **1**: Arbitrary input tensor of type *T*. **Required.**
|
||||
|
||||
**Outputs**:
|
||||
|
||||
|
@ -14,9 +14,9 @@
|
||||
|
||||
**Inputs**:
|
||||
|
||||
* **1**: Multidimensional input tensor of type *T*. **Required**.
|
||||
* **1**: Multidimensional input tensor of type *T*. **Required.**
|
||||
|
||||
* **2**: Scalar or 1D tensor of type *T_INT* with indices of dimensions to squeeze. Values could be negative (have to be from range `[-R, R-1]`, where `R` is the rank of the first input). **Optional**.
|
||||
* **2**: Scalar or 1D tensor of type *T_INT* with indices of dimensions to squeeze. Values could be negative (have to be from range `[-R, R-1]`, where `R` is the rank of the first input). **Optional.**
|
||||
|
||||
**Outputs**:
|
||||
|
||||
|
@ -10,9 +10,9 @@
|
||||
|
||||
**Inputs**:
|
||||
|
||||
* **1**: Tensor of type *T* and arbitrary shape. **Required**.
|
||||
* **1**: Tensor of type *T* and arbitrary shape. **Required.**
|
||||
|
||||
* **2**: Scalar or 1D tensor of type *T_INT* with indices of dimensions to unsqueeze. Values could be negative (have to be from range `[-R, R-1]`, where `R` is the rank of the output). **Required**.
|
||||
* **2**: Scalar or 1D tensor of type *T_INT* with indices of dimensions to unsqueeze. Values could be negative (have to be from range `[-R, R-1]`, where `R` is the rank of the output). **Required.**
|
||||
|
||||
**Outputs**:
|
||||
|
||||
|
@ -12,8 +12,8 @@
|
||||
|
||||
**Inputs**
|
||||
|
||||
* **1**: `data` - Input tensor of type *T* with data for the DFT transformation. Type of elements is any supported floating-point type. The last dimension of the input tensor must be equal to 2, that is the input tensor shape must have the form `[D_0, D_1, ..., D_{N-1}, 2]`, representing the real and imaginary components of complex numbers in `[:, ..., :, 0]` and in `[:, ..., :, 1]` correspondingly. Required.
|
||||
* **2**: `axes` - 1D tensor of type *T_IND* specifying dimension indices where DFT is applied, and `axes` is any unordered list of indices of different dimensions of input tensor, for example, `[0, 4]`, `[4, 0]`, `[4, 2, 1]`, `[1, 2, 3]`, `[-3, 0, -2]`. These indices should be integers from `-(r - 1)` to `(r - 2)` inclusively, where `r = rank(data)`. A negative axis `a` is interpreted as an axis `r - 1 + a`. Other dimensions do not change. The order of elements in `axes` attribute matters, and is mapped directly to elements in the third input `signal_size`. Required.
|
||||
* **1**: `data` - Input tensor of type *T* with data for the DFT transformation. Type of elements is any supported floating-point type. The last dimension of the input tensor must be equal to 2, that is the input tensor shape must have the form `[D_0, D_1, ..., D_{N-1}, 2]`, representing the real and imaginary components of complex numbers in `[:, ..., :, 0]` and in `[:, ..., :, 1]` correspondingly. **Required.**
|
||||
* **2**: `axes` - 1D tensor of type *T_IND* specifying dimension indices where DFT is applied, and `axes` is any unordered list of indices of different dimensions of input tensor, for example, `[0, 4]`, `[4, 0]`, `[4, 2, 1]`, `[1, 2, 3]`, `[-3, 0, -2]`. These indices should be integers from `-(r - 1)` to `(r - 2)` inclusively, where `r = rank(data)`. A negative axis `a` is interpreted as an axis `r - 1 + a`. Other dimensions do not change. The order of elements in `axes` attribute matters, and is mapped directly to elements in the third input `signal_size`. **Required.**
|
||||
* **NOTE**: The following constraint must be satisfied: `rank(data) >= len(axes) + 1 and input_shape[-1] == 2 and (rank(data) - 1) not in axes and (-1) not in axes`.
|
||||
* **3**: `signal_size` - 1D tensor of type *T_SIZE* describing signal size with respect to axes from the input `axes`. If `signal_size[i] == -1`, then DFT is calculated for full size of the axis `axes[i]`. If `signal_size[i] > input_shape[: r - 1][axes[i]]`, then input data are zero-padded with respect to the axis `axes[i]` at the end. Finally, `signal_size[i] < input_shape[: r - 1][axes[i]]`, then input data are trimmed with respect to the axis `axes[i]`. More precisely, if `signal_size[i] < input_shape[: r - 1][axes[i]]`, the slice `0: signal_size[i]` of the axis `axes[i]` is considered. Optional, with default value `[input_shape[: r - 1][a] for a in axes]`.
|
||||
* **NOTE**: If the input `signal_size` is specified, the size of `signal_size` must be the same as the size of `axes`.
|
||||
|
@ -12,8 +12,8 @@
|
||||
|
||||
**Inputs**
|
||||
|
||||
* **1**: `data` - Input tensor of type *T* with data for the IDFT transformation. Type of elements is any supported floating-point type. The last dimension of the input tensor must be equal to 2, that is the input tensor shape must have the form `[D_0, D_1, ..., D_{N-1}, 2]`, representing the real and imaginary components of complex numbers in `[:, ..., :, 0]` and in `[:, ..., :, 1]` correspondingly. Required.
|
||||
* **2**: **2**: `axes` - 1D tensor of type *T_IND* specifying dimension indices where IDFT is applied, and `axes` is any unordered list of indices of different dimensions of input tensor, for example, `[0, 4]`, `[4, 0]`, `[4, 2, 1]`, `[1, 2, 3]`, `[-3, 0, -2]`. These indices should be integers from `-(r - 1)` to `(r - 2)` inclusively, where `r = rank(data)`. A negative axis `a` is interpreted as an axis `r - 1 + a`. Other dimensions do not change. The order of elements in `axes` attribute matters, and is mapped directly to elements in the third input `signal_size`. Required.
|
||||
* **1**: `data` - Input tensor of type *T* with data for the IDFT transformation. Type of elements is any supported floating-point type. The last dimension of the input tensor must be equal to 2, that is the input tensor shape must have the form `[D_0, D_1, ..., D_{N-1}, 2]`, representing the real and imaginary components of complex numbers in `[:, ..., :, 0]` and in `[:, ..., :, 1]` correspondingly. **Required.**
|
||||
* **2**: **2**: `axes` - 1D tensor of type *T_IND* specifying dimension indices where IDFT is applied, and `axes` is any unordered list of indices of different dimensions of input tensor, for example, `[0, 4]`, `[4, 0]`, `[4, 2, 1]`, `[1, 2, 3]`, `[-3, 0, -2]`. These indices should be integers from `-(r - 1)` to `(r - 2)` inclusively, where `r = rank(data)`. A negative axis `a` is interpreted as an axis `r - 1 + a`. Other dimensions do not change. The order of elements in `axes` attribute matters, and is mapped directly to elements in the third input `signal_size`. **Required.**
|
||||
* **NOTE**: The following constraint must be satisfied: `rank(data) >= len(axes) + 1 and input_shape[-1] == 2 and (rank(data) - 1) not in axes and (-1) not in axes`.
|
||||
* **3**: `signal_size` - 1D tensor of type *T_SIZE* describing signal size with respect to axes from the input `axes`. If `signal_size[i] == -1`, then IDFT is calculated for full size of the axis `axes[i]`. If `signal_size[i] > input_shape[: r - 1][axes[i]]`, then input data are zero-padded with respect to the axis `axes[i]` at the end. Finally, if `signal_size[i] < input_shape[: r - 1][axes[i]]`, then input data are trimmed with respect to the axis `axes[i]`. More precisely, if `signal_size[i] < input_shape[: r - 1][axes[i]]`, the slice `0: signal_size[i]` of the axis `axes[i]` is considered. Optional, with default value `[input_shape[: r - 1][a] for a in axes]`.
|
||||
* **NOTE**: If the input `signal_size` is specified, then the size of `signal_size` must be the same as the size of `axes`.
|
||||
|
@ -32,7 +32,7 @@ The Matrix NMS algorithm is described below:
|
||||
* *none* - do not guarantee the order.
|
||||
* **Type**: `string`
|
||||
* **Default value**: `none`
|
||||
* **Required**: *No*
|
||||
* **Required**: *no*
|
||||
|
||||
* *sort_result_across_batch*
|
||||
|
||||
@ -42,7 +42,7 @@ The Matrix NMS algorithm is described below:
|
||||
* *false* - do not sort selected boxes across batches (boxes are sorted per batch element).
|
||||
* **Type**: boolean
|
||||
* **Default value**: false
|
||||
* **Required**: *No*
|
||||
* **Required**: *no*
|
||||
|
||||
* *output_type*
|
||||
|
||||
@ -50,7 +50,7 @@ The Matrix NMS algorithm is described below:
|
||||
* **Range of values**: `i64` or `i32`
|
||||
* **Type**: `string`
|
||||
* **Default value**: `i64`
|
||||
* **Required**: *No*
|
||||
* **Required**: *no*
|
||||
|
||||
* *score_threshold*
|
||||
|
||||
@ -58,7 +58,7 @@ The Matrix NMS algorithm is described below:
|
||||
* **Range of values**: a floating-point number
|
||||
* **Type**: `float`
|
||||
* **Default value**: `0`
|
||||
* **Required**: *No*
|
||||
* **Required**: *no*
|
||||
|
||||
* *nms_top_k*
|
||||
|
||||
@ -66,7 +66,7 @@ The Matrix NMS algorithm is described below:
|
||||
* **Range of values**: an integer
|
||||
* **Type**: `int`
|
||||
* **Default value**: `-1` meaning to keep all boxes
|
||||
* **Required**: *No*
|
||||
* **Required**: *no*
|
||||
|
||||
* *keep_top_k*
|
||||
|
||||
@ -74,7 +74,7 @@ The Matrix NMS algorithm is described below:
|
||||
* **Range of values**: an integer
|
||||
* **Type**: `int`
|
||||
* **Default value**: `-1` meaning to keep all boxes
|
||||
* **Required**: *No*
|
||||
* **Required**: *no*
|
||||
|
||||
* *background_class*
|
||||
|
||||
@ -82,7 +82,7 @@ The Matrix NMS algorithm is described below:
|
||||
* **Range of values**: an integer
|
||||
* **Type**: `int`
|
||||
* **Default value**: `-1` meaning to keep all classes
|
||||
* **Required**: *No*
|
||||
* **Required**: *no*
|
||||
|
||||
* *normalized*
|
||||
|
||||
@ -92,7 +92,7 @@ The Matrix NMS algorithm is described below:
|
||||
* *false* - the box coordinates are not normalized.
|
||||
* **Type**: boolean
|
||||
* **Default value**: True
|
||||
* **Required**: *No*
|
||||
* **Required**: *no*
|
||||
|
||||
* *decay_function*
|
||||
|
||||
@ -100,7 +100,7 @@ The Matrix NMS algorithm is described below:
|
||||
* **Range of values**: `gaussian`, `linear`
|
||||
* **Type**: `string`
|
||||
* **Default value**: `linear`
|
||||
* **Required**: *No*
|
||||
* **Required**: *no*
|
||||
|
||||
* *gaussian_sigma*
|
||||
|
||||
@ -108,7 +108,7 @@ The Matrix NMS algorithm is described below:
|
||||
* **Range of values**: a floating-point number
|
||||
* **Type**: `float`
|
||||
* **Default value**: `2.0`
|
||||
* **Required**: *No*
|
||||
* **Required**: *no*
|
||||
|
||||
* *post_threshold*
|
||||
|
||||
@ -116,7 +116,7 @@ The Matrix NMS algorithm is described below:
|
||||
* **Range of values**: a floating-point number
|
||||
* **Type**: `float`
|
||||
* **Default value**: `0`
|
||||
* **Required**: *No*
|
||||
* **Required**: *no*
|
||||
|
||||
**Inputs**:
|
||||
|
||||
|
@ -33,7 +33,7 @@ Boxes of `background_class` are skipped and thus eliminated.
|
||||
* *none* - do not guarantee the order.
|
||||
* **Type**: `string`
|
||||
* **Default value**: `none`
|
||||
* **Required**: *No*
|
||||
* **Required**: *no*
|
||||
|
||||
* *sort_result_across_batch*
|
||||
|
||||
@ -43,7 +43,7 @@ Boxes of `background_class` are skipped and thus eliminated.
|
||||
* *false* - do not sort selected boxes across batches (boxes are sorted per batch element).
|
||||
* **Type**: boolean
|
||||
* **Default value**: false
|
||||
* **Required**: *No*
|
||||
* **Required**: *no*
|
||||
|
||||
* *output_type*
|
||||
|
||||
@ -51,7 +51,7 @@ Boxes of `background_class` are skipped and thus eliminated.
|
||||
* **Range of values**: `i64` or `i32`
|
||||
* **Type**: `string`
|
||||
* **Default value**: `i64`
|
||||
* **Required**: *No*
|
||||
* **Required**: *no*
|
||||
|
||||
* *iou_threshold*
|
||||
|
||||
@ -59,7 +59,7 @@ Boxes of `background_class` are skipped and thus eliminated.
|
||||
* **Range of values**: a floating-point number
|
||||
* **Type**: `float`
|
||||
* **Default value**: `0`
|
||||
* **Required**: *No*
|
||||
* **Required**: *no*
|
||||
|
||||
* *score_threshold*
|
||||
|
||||
@ -67,7 +67,7 @@ Boxes of `background_class` are skipped and thus eliminated.
|
||||
* **Range of values**: a floating-point number
|
||||
* **Type**: `float`
|
||||
* **Default value**: `0`
|
||||
* **Required**: *No*
|
||||
* **Required**: *no*
|
||||
|
||||
* *nms_top_k*
|
||||
|
||||
@ -75,7 +75,7 @@ Boxes of `background_class` are skipped and thus eliminated.
|
||||
* **Range of values**: an integer
|
||||
* **Type**: `int`
|
||||
* **Default value**: `-1` meaning to keep all boxes
|
||||
* **Required**: *No*
|
||||
* **Required**: *no*
|
||||
|
||||
* *keep_top_k*
|
||||
|
||||
@ -83,7 +83,7 @@ Boxes of `background_class` are skipped and thus eliminated.
|
||||
* **Range of values**: an integer
|
||||
* **Type**: `int`
|
||||
* **Default value**: `-1` meaning to keep all boxes
|
||||
* **Required**: *No*
|
||||
* **Required**: *no*
|
||||
|
||||
* *background_class*
|
||||
|
||||
@ -91,7 +91,7 @@ Boxes of `background_class` are skipped and thus eliminated.
|
||||
* **Range of values**: an integer
|
||||
* **Type**: `int`
|
||||
* **Default value**: `-1` meaning to keep all classes.
|
||||
* **Required**: *No*
|
||||
* **Required**: *no*
|
||||
|
||||
* *normalized*
|
||||
|
||||
@ -101,7 +101,7 @@ Boxes of `background_class` are skipped and thus eliminated.
|
||||
* *false* - the box coordinates are not normalized.
|
||||
* **Type**: boolean
|
||||
* **Default value**: True
|
||||
* **Required**: *No*
|
||||
* **Required**: *no*
|
||||
|
||||
* *nms_eta*
|
||||
|
||||
@ -109,7 +109,7 @@ Boxes of `background_class` are skipped and thus eliminated.
|
||||
* **Range of values**: a floating-point number in close range `[0, 1.0]`.
|
||||
* **Type**: `float`
|
||||
* **Default value**: `1.0`
|
||||
* **Required**: *No*
|
||||
* **Required**: *no*
|
||||
|
||||
**Inputs**:
|
||||
|
||||
|
@ -41,9 +41,9 @@ class must not exceed `max_output_boxes_per_class`.
|
||||
|
||||
**Inputs**:
|
||||
|
||||
* **1**: `boxes` - floating point tensor of shape `[num_batches, num_boxes, 4]` with box coordinates. Required.
|
||||
* **1**: `boxes` - floating point tensor of shape `[num_batches, num_boxes, 4]` with box coordinates. **Required.**
|
||||
|
||||
* **2**: `scores` - floating point tensor of shape `[num_batches, num_classes, num_boxes]` with box scores. Required.
|
||||
* **2**: `scores` - floating point tensor of shape `[num_batches, num_classes, num_boxes]` with box scores. **Required.**
|
||||
|
||||
* **3**: `max_output_boxes_per_class` - integer scalar tensor specifying maximum number of boxes to be selected per class. Optional with default value 0 meaning select no boxes.
|
||||
|
||||
|
@ -45,13 +45,13 @@ class must not exceed `max_output_boxes_per_class`.
|
||||
* **Range of values**: "i64" or "i32"
|
||||
* **Type**: string
|
||||
* **Default value**: "i64"
|
||||
* **Required**: *No*
|
||||
* **Required**: *no*
|
||||
|
||||
**Inputs**:
|
||||
|
||||
* **1**: `boxes` - tensor of type *T* and shape `[num_batches, num_boxes, 4]` with box coordinates. Required.
|
||||
* **1**: `boxes` - tensor of type *T* and shape `[num_batches, num_boxes, 4]` with box coordinates. **Required.**
|
||||
|
||||
* **2**: `scores` - tensor of type *T* and shape `[num_batches, num_classes, num_boxes]` with box scores. Required.
|
||||
* **2**: `scores` - tensor of type *T* and shape `[num_batches, num_classes, num_boxes]` with box scores. **Required.**
|
||||
|
||||
* **3**: `max_output_boxes_per_class` - scalar tensor of type *T_MAX_BOXES* specifying maximum number of boxes to be selected per class. Optional with default value 0 meaning select no boxes.
|
||||
|
||||
|
@ -45,13 +45,13 @@ class must not exceed `max_output_boxes_per_class`.
|
||||
* **Range of values**: "i64" or "i32"
|
||||
* **Type**: string
|
||||
* **Default value**: "i64"
|
||||
* **Required**: *No*
|
||||
* **Required**: *no*
|
||||
|
||||
**Inputs**:
|
||||
|
||||
* **1**: `boxes` - tensor of type *T* and shape `[num_batches, num_boxes, 4]` with box coordinates. Required.
|
||||
* **1**: `boxes` - tensor of type *T* and shape `[num_batches, num_boxes, 4]` with box coordinates. **Required.**
|
||||
|
||||
* **2**: `scores` - tensor of type *T* and shape `[num_batches, num_classes, num_boxes]` with box scores. Required.
|
||||
* **2**: `scores` - tensor of type *T* and shape `[num_batches, num_classes, num_boxes]` with box scores. **Required.**
|
||||
|
||||
* **3**: `max_output_boxes_per_class` - scalar tensor of type *T_MAX_BOXES* specifying maximum number of boxes to be selected per class. Optional with default value 0 meaning select no boxes.
|
||||
|
||||
|
@ -50,13 +50,13 @@ class must not exceed `max_output_boxes_per_class`.
|
||||
* **Range of values**: "i64" or "i32"
|
||||
* **Type**: string
|
||||
* **Default value**: "i64"
|
||||
* **Required**: *No*
|
||||
* **Required**: *no*
|
||||
|
||||
**Inputs**:
|
||||
|
||||
* **1**: `boxes` - tensor of type *T* and shape `[num_batches, num_boxes, 4]` with box coordinates. Required.
|
||||
* **1**: `boxes` - tensor of type *T* and shape `[num_batches, num_boxes, 4]` with box coordinates. **Required.**
|
||||
|
||||
* **2**: `scores` - tensor of type *T* and shape `[num_batches, num_classes, num_boxes]` with box scores. Required.
|
||||
* **2**: `scores` - tensor of type *T* and shape `[num_batches, num_classes, num_boxes]` with box scores. **Required.**
|
||||
|
||||
* **3**: `max_output_boxes_per_class` - scalar or 1D tensor with 1 element of type *T_MAX_BOXES* specifying maximum number of boxes to be selected per class. Optional with default value 0 meaning select no boxes.
|
||||
|
||||
|
@ -35,11 +35,11 @@
|
||||
* **Range of values**: "i64" or "i32"
|
||||
* **Type**: string
|
||||
* **Default value**: "i32"
|
||||
* **Required**: *No*
|
||||
* **Required**: *no*
|
||||
|
||||
**Inputs**:
|
||||
|
||||
* **1**: Arbitrary tensor. Required.
|
||||
* **1**: Arbitrary tensor. **Required.**
|
||||
|
||||
* **2**: *k* -- scalar specifies how many maximum/minimum elements should be computed
|
||||
|
||||
|
@ -35,14 +35,14 @@
|
||||
* **Range of values**: "i64" or "i32"
|
||||
* **Type**: string
|
||||
* **Default value**: "i32"
|
||||
* **Required**: *No*
|
||||
* **Required**: *no*
|
||||
|
||||
|
||||
**Inputs**:
|
||||
|
||||
* **1**: tensor of arbitrary rank of type *T*. Required.
|
||||
* **1**: tensor of arbitrary rank of type *T*. **Required.**
|
||||
|
||||
* **2**: *k* -- scalar of any integer type specifies how many maximum/minimum elements should be computed. Required
|
||||
* **2**: *k* -- scalar of any integer type specifies how many maximum/minimum elements should be computed. **Required.**
|
||||
|
||||
**Outputs**:
|
||||
|
||||
|
@ -10,13 +10,13 @@
|
||||
|
||||
**Inputs**:
|
||||
|
||||
* **1**: `emb_table` tensor containing the embedding lookup table of the module of shape `[num_emb, emb_dim1, emb_dim2, ...]` and of type *T*. Required.
|
||||
* **1**: `emb_table` tensor containing the embedding lookup table of the module of shape `[num_emb, emb_dim1, emb_dim2, ...]` and of type *T*. **Required.**
|
||||
|
||||
* **2**: `indices` tensor of shape `[num_indices]` and of type *T_IND*. Required.
|
||||
* **2**: `indices` tensor of shape `[num_indices]` and of type *T_IND*. **Required.**
|
||||
|
||||
* **3**: `offsets` tensor of shape `[batch]` and of type *T_IND* containing the starting index positions of each "bag" in `indices`. Required.
|
||||
* **3**: `offsets` tensor of shape `[batch]` and of type *T_IND* containing the starting index positions of each "bag" in `indices`. **Required.**
|
||||
|
||||
* **4**: `default_index` scalar of type *T_IND* containing default index in embedding table to fill empty "bags". If not provided empty "bags" are filled with zeros. Optional.
|
||||
* **4**: `default_index` scalar of type *T_IND* containing default index in embedding table to fill empty "bags". If not provided empty "bags" are filled with zeros. **Optional.**
|
||||
|
||||
* **5**: `per_sample_weights` tensor of the same shape as `indices` and of type *T*. Each value in this tensor are multiplied with each value pooled from embedding table for each index. Optional, default is tensor of ones.
|
||||
|
||||
|
@ -10,9 +10,9 @@
|
||||
|
||||
**Inputs**:
|
||||
|
||||
* **1**: `emb_table` tensor containing the embedding lookup table of the module of shape `[num_emb, emb_dim1, emb_dim2, ...]` and of type *T*. Required.
|
||||
* **1**: `emb_table` tensor containing the embedding lookup table of the module of shape `[num_emb, emb_dim1, emb_dim2, ...]` and of type *T*. **Required.**
|
||||
|
||||
* **2**: `indices` tensor of shape `[batch, indices_per_bag]` and of type *T_IND*. Required.
|
||||
* **2**: `indices` tensor of shape `[batch, indices_per_bag]` and of type *T_IND*. **Required.**
|
||||
|
||||
* **3**: `per_sample_weights` tensor of the same shape as `indices` and of type *T*. Each value in this tensor are multiplied with each value pooled from embedding table for each index. Optional, default is tensor of ones.
|
||||
|
||||
|
@ -10,15 +10,15 @@
|
||||
|
||||
**Inputs**:
|
||||
|
||||
* **1**: `emb_table` tensor containing the embedding lookup table of the module of shape `[num_emb, emb_dim1, emb_dim2, ...]` and of type *T*. Required.
|
||||
* **1**: `emb_table` tensor containing the embedding lookup table of the module of shape `[num_emb, emb_dim1, emb_dim2, ...]` and of type *T*. **Required.**
|
||||
|
||||
* **2**: `indices` tensor of shape `[num_indices]` and of type *T_IND*. Required.
|
||||
* **2**: `indices` tensor of shape `[num_indices]` and of type *T_IND*. **Required.**
|
||||
|
||||
* **3**: `segment_ids` tensor of shape `[num_indices]` and of type *T_IND* with indices into the output Tensor. Values should be sorted and can be repeated. Required.
|
||||
* **3**: `segment_ids` tensor of shape `[num_indices]` and of type *T_IND* with indices into the output Tensor. Values should be sorted and can be repeated. **Required.**
|
||||
|
||||
* **4**: `num_segments` scalar of type *T_IND* indicating the number of segments. Required.
|
||||
* **4**: `num_segments` scalar of type *T_IND* indicating the number of segments. **Required.**
|
||||
|
||||
* **5**: `default_index` scalar of type *T_IND* containing default index in embedding table to fill empty segments. If not provided empty segments are filled with zeros. Optional.
|
||||
* **5**: `default_index` scalar of type *T_IND* containing default index in embedding table to fill empty segments. If not provided empty segments are filled with zeros. **Optional.**
|
||||
|
||||
* **6**: `per_sample_weights` tensor of the same shape as `indices` and of type *T*. Each value in this tensor are multiplied with each value pooled from embedding table for each index. Optional, default is tensor of ones.
|
||||
|
||||
|
@ -10,7 +10,7 @@
|
||||
#include <ngraph/opsets/opset7.hpp>
|
||||
#include <ngraph/pattern/op/or.hpp>
|
||||
#include <ngraph/pattern/op/wrap_type.hpp>
|
||||
|
||||
#include <ngraph/rt_info.hpp>
|
||||
#include "backend/gna_limitations.hpp"
|
||||
|
||||
using namespace GNAPluginNS;
|
||||
@ -54,24 +54,29 @@ static bool Convert(std::shared_ptr<ngraph::Node> conv,
|
||||
auto split_node = std::make_shared<ngraph::opset7::VariadicSplit>(conv->input_value(0),
|
||||
ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape({1}), std::vector<int64_t>{width_axis}),
|
||||
ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape({split_sizes.size()}), split_sizes));
|
||||
ngraph::copy_runtime_info(conv, split_node);
|
||||
split_node->set_friendly_name(conv->get_friendly_name() + "/split");
|
||||
ngraph::OutputVector convOutputs;
|
||||
std::shared_ptr<ngraph::Node> root_node = fq ? fq : (add ? add : conv);
|
||||
for (int i = 0; i < split_sizes.size(); ++i) {
|
||||
std::shared_ptr<ngraph::Node> output = conv->clone_with_new_inputs({split_node->output(i), conv->input_value(1)});
|
||||
ngraph::copy_runtime_info(split_node, output);
|
||||
output->set_friendly_name(conv->get_friendly_name() + "_" + std::to_string(i));
|
||||
if (bias) {
|
||||
output = std::make_shared<ngraph::opset7::Add>(output, bias);
|
||||
ngraph::copy_runtime_info(conv, output);
|
||||
}
|
||||
|
||||
if (fq) {
|
||||
output = fq->clone_with_new_inputs({output, fq->input_value(1), fq->input_value(2),
|
||||
fq->input_value(3), fq->input_value(4)});
|
||||
ngraph::copy_runtime_info(fq, output);
|
||||
}
|
||||
convOutputs.push_back(output);
|
||||
}
|
||||
|
||||
auto concat = std::make_shared<ngraph::opset7::Concat>(convOutputs, width_axis);
|
||||
ngraph::copy_runtime_info(conv, concat);
|
||||
concat->set_friendly_name(conv->get_friendly_name());
|
||||
ngraph::replace_node(root_node, concat);
|
||||
return true;
|
||||
|
@ -53,7 +53,7 @@ function(add_common_target TARGET_NAME STATIC_IE)
|
||||
openvino_developer_export_targets(COMPONENT inference_engine_vpu TARGETS ${TARGET_NAME})
|
||||
|
||||
target_link_libraries(${TARGET_NAME} PUBLIC ngraph inference_engine_transformations
|
||||
PRIVATE mvnc openvino::itt)
|
||||
PRIVATE openvino::itt)
|
||||
|
||||
if(NOT STATIC_IE)
|
||||
target_link_libraries(${TARGET_NAME} PUBLIC inference_engine_legacy)
|
||||
|
@ -27,6 +27,7 @@ VERIFIED_OP_REFERENCES = [
|
||||
'Cosh-1',
|
||||
'DeformableConvolution-1',
|
||||
'DeformablePSROIPooling-1',
|
||||
'DepthToSpace-1',
|
||||
'DetectionOutput-1',
|
||||
'Divide-1',
|
||||
'ExperimentalDetectronDetectionOutput-6',
|
||||
@ -92,6 +93,7 @@ VERIFIED_OP_REFERENCES = [
|
||||
'ScatterNDUpdate-4',
|
||||
'ShapeOf-1',
|
||||
'ShapeOf-3',
|
||||
'ShuffleChannels-1',
|
||||
'Sigmoid-1',
|
||||
'Sin-1',
|
||||
'Sinh-1'
|
||||
|
@ -0,0 +1,328 @@
|
||||
// Copyright (C) 2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include "transformations/split_convolution_with_large_buffer_size.hpp"
|
||||
|
||||
#include "common_test_utils/ngraph_test_utils.hpp"
|
||||
#include <ngraph/function.hpp>
|
||||
#include <ngraph/opsets/opset7.hpp>
|
||||
#include <ngraph/pass/manager.hpp>
|
||||
#include <transformations/init_node_info.hpp>
|
||||
|
||||
namespace testing {
|
||||
namespace {
|
||||
|
||||
struct Graph {
|
||||
std::shared_ptr<ngraph::Function> createFunction();
|
||||
|
||||
std::shared_ptr<ngraph::opset7::Parameter> input_params;
|
||||
ngraph::OutputVector output_nodes;
|
||||
};
|
||||
|
||||
std::shared_ptr<ngraph::Function> Graph::createFunction() {
|
||||
auto result = std::make_shared<ngraph::opset7::Result>(output_nodes.front());
|
||||
return std::make_shared<ngraph::Function>(ngraph::ResultVector{result},
|
||||
ngraph::ParameterVector{input_params});
|
||||
}
|
||||
|
||||
// TODO: use std::make_unique when C++14 will be available
|
||||
template <typename T, typename... Args>
|
||||
std::unique_ptr<T> createUnique(Args&&... args) {
|
||||
return std::unique_ptr<T>(new T(std::forward<Args>(args)...));
|
||||
}
|
||||
|
||||
class CreateGraphDecorator {
|
||||
public:
|
||||
CreateGraphDecorator(std::unique_ptr<CreateGraphDecorator> prev = nullptr) : prev_(std::move(prev)) {}
|
||||
virtual ~CreateGraphDecorator() = default;
|
||||
virtual Graph build() {
|
||||
Graph graph;
|
||||
if (prev_)
|
||||
graph = prev_->build();
|
||||
updateGraph(graph);
|
||||
return graph;
|
||||
}
|
||||
protected:
|
||||
virtual void updateGraph(Graph& graph) = 0;
|
||||
private:
|
||||
CreateGraphDecorator(const CreateGraphDecorator&) = delete;
|
||||
CreateGraphDecorator& operator=(const CreateGraphDecorator&) = delete;
|
||||
private:
|
||||
std::unique_ptr<CreateGraphDecorator> prev_;
|
||||
};
|
||||
|
||||
using CreateGraphDecoratorPtr = std::unique_ptr<CreateGraphDecorator>;
|
||||
|
||||
class CreateAppendableGraphDecorator : public CreateGraphDecorator {
|
||||
public:
|
||||
CreateAppendableGraphDecorator(std::unique_ptr<CreateGraphDecorator> prev = nullptr) :
|
||||
CreateGraphDecorator(std::move(prev)) {}
|
||||
protected:
|
||||
void updateGraph(Graph& graph) override {
|
||||
ngraph::OutputVector new_graph_output;
|
||||
for (auto&& node : graph.output_nodes) {
|
||||
new_graph_output.emplace_back(createOutputNode(node));
|
||||
}
|
||||
|
||||
if (graph.output_nodes.empty())
|
||||
new_graph_output.emplace_back(createOutputNode(graph.input_params));
|
||||
|
||||
graph.output_nodes.swap(new_graph_output);
|
||||
}
|
||||
virtual ngraph::Output<ngraph::Node> createOutputNode(const ngraph::Output<ngraph::Node>& parent_node) = 0;
|
||||
};
|
||||
|
||||
class CreateBaseDecorator : public CreateGraphDecorator {
|
||||
public:
|
||||
// always the first decorator => no prev_builder
|
||||
CreateBaseDecorator(const ngraph::Shape& input_data_shape = ngraph::Shape{1, 64, 4096, 4096}) :
|
||||
CreateGraphDecorator(nullptr),
|
||||
input_data_shape_(input_data_shape) {}
|
||||
protected:
|
||||
Graph build() override;
|
||||
void updateGraph(Graph& graph) override {}
|
||||
private:
|
||||
const ngraph::Shape input_data_shape_;
|
||||
};
|
||||
|
||||
using CreateBaseDecoratorPtr = std::unique_ptr<CreateBaseDecorator>;
|
||||
|
||||
Graph CreateBaseDecorator::build() {
|
||||
Graph graph;
|
||||
graph.input_params = std::make_shared<ngraph::opset7::Parameter>(ngraph::element::f32,
|
||||
input_data_shape_);
|
||||
return graph;
|
||||
}
|
||||
|
||||
class CreateConvolution : public CreateAppendableGraphDecorator {
|
||||
public:
|
||||
CreateConvolution(CreateGraphDecoratorPtr prev, const ngraph::Shape& kernel_shape = ngraph::Shape{1, 64, 1, 1}) :
|
||||
CreateAppendableGraphDecorator(std::move(prev)),
|
||||
kernel_shape_(kernel_shape) {}
|
||||
protected:
|
||||
ngraph::Output<ngraph::Node> createOutputNode(const ngraph::Output<ngraph::Node>& parent_node) override;
|
||||
private:
|
||||
const ngraph::Shape kernel_shape_;
|
||||
};
|
||||
|
||||
ngraph::Output<ngraph::Node> CreateConvolution::createOutputNode(const ngraph::Output<ngraph::Node>& parent_node) {
|
||||
auto kernel = ngraph::opset7::Constant::create(ngraph::element::f32,
|
||||
kernel_shape_, {1});
|
||||
|
||||
return std::make_shared<ngraph::opset7::Convolution>(parent_node,
|
||||
kernel,
|
||||
ngraph::Strides{1, 1},
|
||||
ngraph::CoordinateDiff{0, 0},
|
||||
ngraph::CoordinateDiff{0, 0},
|
||||
ngraph::Strides{1, 1});
|
||||
}
|
||||
|
||||
// should be used only after CreateBaseDecorator
|
||||
class CreateSplittedConvolution : public CreateGraphDecorator {
|
||||
public:
|
||||
CreateSplittedConvolution(CreateGraphDecoratorPtr prev,
|
||||
const ngraph::Shape& kernel_shape = ngraph::Shape{1, 64, 1, 1},
|
||||
const ngraph::Shape& split_shape = ngraph::Shape{960, 960, 960, 960, 256}) :
|
||||
CreateGraphDecorator(std::move(prev)),
|
||||
kernel_shape_(kernel_shape),
|
||||
split_shape_(split_shape) {}
|
||||
protected:
|
||||
void updateGraph(Graph& graph) override;
|
||||
private:
|
||||
const ngraph::Shape kernel_shape_;
|
||||
const ngraph::Shape split_shape_;
|
||||
};
|
||||
|
||||
void CreateSplittedConvolution::updateGraph(Graph& graph) {
|
||||
auto split_node_c1 = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape({1}), std::vector<int64_t>{3});
|
||||
auto split_node_c2 = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape({split_shape_.size()}), split_shape_);
|
||||
auto split_node = std::make_shared<ngraph::opset7::VariadicSplit>(graph.input_params,
|
||||
split_node_c1,
|
||||
split_node_c2);
|
||||
|
||||
auto kernel = ngraph::opset7::Constant::create(ngraph::element::f32,
|
||||
kernel_shape_, {1});
|
||||
|
||||
for (int i = 0; i < split_shape_.size(); ++i) {
|
||||
auto convolution_operation = std::make_shared<ngraph::opset7::Convolution>(split_node->output(i),
|
||||
kernel,
|
||||
ngraph::Strides{1, 1},
|
||||
ngraph::CoordinateDiff{0, 0},
|
||||
ngraph::CoordinateDiff{0, 0},
|
||||
ngraph::Strides{1, 1});
|
||||
graph.output_nodes.push_back(convolution_operation);
|
||||
}
|
||||
}
|
||||
|
||||
class CreateAdd : public CreateAppendableGraphDecorator {
|
||||
public:
|
||||
CreateAdd(CreateGraphDecoratorPtr prev) :
|
||||
CreateAppendableGraphDecorator(std::move(prev)) {}
|
||||
protected:
|
||||
ngraph::Output<ngraph::Node> createOutputNode(const ngraph::Output<ngraph::Node>& parent_node) override;
|
||||
};
|
||||
|
||||
ngraph::Output<ngraph::Node> CreateAdd::createOutputNode(const ngraph::Output<ngraph::Node>& parent_node) {
|
||||
auto bias = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {1});
|
||||
return std::make_shared<ngraph::opset7::Add>(parent_node, bias);
|
||||
}
|
||||
|
||||
class CreateFakeQuantize : public CreateAppendableGraphDecorator {
|
||||
public:
|
||||
CreateFakeQuantize(CreateGraphDecoratorPtr prev) :
|
||||
CreateAppendableGraphDecorator(std::move(prev)) {}
|
||||
protected:
|
||||
ngraph::Output<ngraph::Node> createOutputNode(const ngraph::Output<ngraph::Node>& parent_node) override;
|
||||
};
|
||||
|
||||
ngraph::Output<ngraph::Node> CreateFakeQuantize::createOutputNode(const ngraph::Output<ngraph::Node>& parent_node) {
|
||||
auto input_low = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {1});
|
||||
auto input_high = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {20});
|
||||
auto output_low = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {0});
|
||||
auto output_high = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {10});
|
||||
return std::make_shared<ngraph::opset7::FakeQuantize>(parent_node, input_low,
|
||||
input_high, output_low,
|
||||
output_high, 11);
|
||||
}
|
||||
|
||||
class CreateConcat : public CreateGraphDecorator {
|
||||
public:
|
||||
CreateConcat(CreateGraphDecoratorPtr prev) :
|
||||
CreateGraphDecorator(std::move(prev)) {}
|
||||
protected:
|
||||
void updateGraph(Graph& graph) override;
|
||||
};
|
||||
|
||||
void CreateConcat::updateGraph(Graph& graph) {
|
||||
ngraph::OutputVector new_graph_output;
|
||||
new_graph_output.emplace_back(std::make_shared<ngraph::opset7::Concat>(graph.output_nodes, 3));
|
||||
graph.output_nodes.swap(new_graph_output);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------------------------------------
|
||||
|
||||
template<typename DecorT, typename... DecorTs, typename std::enable_if<(sizeof...(DecorTs) == 0), bool>::type = true>
|
||||
CreateGraphDecoratorPtr createBuildDecorator() {
|
||||
CreateGraphDecoratorPtr build_decorator = createUnique<CreateBaseDecorator>();
|
||||
return createUnique<DecorT>(std::move(build_decorator));
|
||||
}
|
||||
|
||||
template<typename DecorT, typename... DecorTs, typename std::enable_if<(sizeof...(DecorTs) > 0), bool>::type = true>
|
||||
CreateGraphDecoratorPtr createBuildDecorator() {
|
||||
CreateGraphDecoratorPtr build_decorator = createBuildDecorator<DecorTs...>();
|
||||
return createUnique<DecorT>(std::move(build_decorator));
|
||||
}
|
||||
|
||||
template<typename DecorT, typename... DecorTs>
|
||||
Graph createGraph() {
|
||||
CreateGraphDecoratorPtr build_decorator = createBuildDecorator<DecorT, DecorTs...>();
|
||||
return build_decorator->build();
|
||||
}
|
||||
|
||||
CreateGraphDecoratorPtr createBuildDecorator(const ngraph::Shape& input_shape, const ngraph::Shape& kernel_shape) {
|
||||
CreateGraphDecoratorPtr base_decorator = createUnique<CreateBaseDecorator>(input_shape);
|
||||
return createUnique<CreateConvolution>(std::move(base_decorator), kernel_shape);
|
||||
}
|
||||
|
||||
template<typename DecorT, typename... DecorTs, typename std::enable_if<(sizeof...(DecorTs) == 0), bool>::type = true>
|
||||
CreateGraphDecoratorPtr createBuildDecorator(const ngraph::Shape& input_shape,
|
||||
const ngraph::Shape& kernel_shape) {
|
||||
CreateGraphDecoratorPtr build_decorator = createBuildDecorator(input_shape, kernel_shape);
|
||||
return createUnique<DecorT>(std::move(build_decorator));
|
||||
}
|
||||
|
||||
template<typename DecorT, typename... DecorTs, typename std::enable_if<(sizeof...(DecorTs) > 0), bool>::type = true>
|
||||
CreateGraphDecoratorPtr createBuildDecorator(const ngraph::Shape& input_shape,
|
||||
const ngraph::Shape& kernel_shape) {
|
||||
CreateGraphDecoratorPtr build_decorator = createBuildDecorator<DecorTs...>(input_shape, kernel_shape);
|
||||
return createUnique<DecorT>(std::move(build_decorator));
|
||||
}
|
||||
|
||||
Graph createSolidGraph(const ngraph::Shape& input_shape, const ngraph::Shape& kernel_shape) {
|
||||
CreateGraphDecoratorPtr build_decorator = createBuildDecorator(input_shape, kernel_shape);
|
||||
return build_decorator->build();
|
||||
}
|
||||
|
||||
template<typename DecorT, typename... DecorTs>
|
||||
Graph createSolidGraph(const ngraph::Shape& input_shape, const ngraph::Shape& kernel_shape) {
|
||||
CreateGraphDecoratorPtr build_decorator = createBuildDecorator<DecorT, DecorTs...>(input_shape, kernel_shape);
|
||||
return build_decorator->build();
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------------------------------------
|
||||
|
||||
class SplitConvolutionFixture: public CommonTestUtils::TestsCommon,
|
||||
public ::testing::WithParamInterface<std::tuple<Graph /* tranformed */,
|
||||
Graph /* reference */,
|
||||
ngraph::pass::Manager>> {
|
||||
public:
|
||||
void SetUp() override;
|
||||
public:
|
||||
std::shared_ptr<ngraph::Function> function, reference_function;
|
||||
ngraph::pass::Manager pass_manager;
|
||||
};
|
||||
|
||||
void SplitConvolutionFixture::SetUp() {
|
||||
// TODO: use auto & [transformed_graph, reference_graph] = this->GetParam() when C++17
|
||||
Graph transformed_graph;
|
||||
Graph reference_graph;
|
||||
std::tie(transformed_graph, reference_graph, pass_manager) = this->GetParam();
|
||||
|
||||
function = transformed_graph.createFunction();
|
||||
reference_function = reference_graph.createFunction();
|
||||
}
|
||||
|
||||
void execute_test(std::shared_ptr<ngraph::Function> function,
|
||||
std::shared_ptr<ngraph::Function> reference_function,
|
||||
ngraph::pass::Manager& pass_manager) {
|
||||
pass_manager.run_passes(function);
|
||||
const FunctionsComparator func_comparator = FunctionsComparator::with_default().enable(FunctionsComparator::ATTRIBUTES);
|
||||
const FunctionsComparator::Result result = func_comparator(function, reference_function);
|
||||
ASSERT_TRUE(result.valid);
|
||||
}
|
||||
|
||||
template <typename TransformationT>
|
||||
ngraph::pass::Manager createPassManager() {
|
||||
ngraph::pass::Manager manager;
|
||||
manager.register_pass<ngraph::pass::InitNodeInfo>();
|
||||
manager.register_pass<TransformationT>();
|
||||
return manager;
|
||||
}
|
||||
|
||||
TEST_P(SplitConvolutionFixture, CompareFunctions) {
|
||||
execute_test(function, reference_function, pass_manager);
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(SplitConvolutionTestSuite, SplitConvolutionFixture,
|
||||
::testing::Values(std::make_tuple(createGraph<CreateConvolution>(),
|
||||
createGraph<CreateConcat, CreateSplittedConvolution>(),
|
||||
createPassManager<GNAPluginNS::SplitConvolution>()),
|
||||
std::make_tuple(createGraph<CreateAdd, CreateConvolution>(),
|
||||
createGraph<CreateConcat, CreateAdd, CreateSplittedConvolution>(),
|
||||
createPassManager<GNAPluginNS::SplitConvolutionWithBias>()),
|
||||
std::make_tuple(createGraph<CreateFakeQuantize, CreateConvolution>(),
|
||||
createGraph<CreateConcat, CreateFakeQuantize, CreateSplittedConvolution>(),
|
||||
createPassManager<GNAPluginNS::SplitConvolutionWithFq>()),
|
||||
std::make_tuple(createGraph<CreateFakeQuantize, CreateAdd, CreateConvolution>(),
|
||||
createGraph<CreateConcat, CreateFakeQuantize, CreateAdd, CreateSplittedConvolution>(),
|
||||
createPassManager<GNAPluginNS::SplitConvolutionWithFq>()),
|
||||
std::make_tuple(createSolidGraph(ngraph::Shape{1, 1, 1, 1}, ngraph::Shape{1, 1, 1, 1}),
|
||||
createSolidGraph(ngraph::Shape{1, 1, 1, 1}, ngraph::Shape{1, 1, 1, 1}),
|
||||
createPassManager<GNAPluginNS::SplitConvolution>()),
|
||||
std::make_tuple(createSolidGraph<CreateAdd>(ngraph::Shape{1, 1, 1, 1}, ngraph::Shape{1, 1, 1, 1}),
|
||||
createSolidGraph<CreateAdd>(ngraph::Shape{1, 1, 1, 1}, ngraph::Shape{1, 1, 1, 1}),
|
||||
createPassManager<GNAPluginNS::SplitConvolutionWithBias>()),
|
||||
std::make_tuple(createSolidGraph<CreateFakeQuantize>(ngraph::Shape{1, 1, 1, 1}, ngraph::Shape{1, 1, 1, 1}),
|
||||
createSolidGraph<CreateFakeQuantize>(ngraph::Shape{1, 1, 1, 1}, ngraph::Shape{1, 1, 1, 1}),
|
||||
createPassManager<GNAPluginNS::SplitConvolutionWithFq>()),
|
||||
std::make_tuple(createSolidGraph<CreateAdd, CreateFakeQuantize>(ngraph::Shape{1, 1, 1, 1},
|
||||
ngraph::Shape{1, 1, 1, 1}),
|
||||
createSolidGraph<CreateAdd, CreateFakeQuantize>(ngraph::Shape{1, 1, 1, 1},
|
||||
ngraph::Shape{1, 1, 1, 1}),
|
||||
createPassManager<GNAPluginNS::SplitConvolutionWithFq>())));
|
||||
|
||||
} // namespace
|
||||
} // namespace testing
|
@ -2,11 +2,10 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "cldnn/runtime/engine.hpp"
|
||||
#include "cldnn/primitives/implementation_desc.hpp"
|
||||
|
||||
#include "topology.hpp"
|
||||
|
||||
@ -99,14 +98,6 @@ struct learning_params {
|
||||
learning_params() : momentum(0.9f), weights_decay(0.0005f) {}
|
||||
};
|
||||
|
||||
/// @brief Description of primitives implementation.
|
||||
struct implementation_desc {
|
||||
format::type output_format; ///< Output format.
|
||||
std::string kernel_name; ///< GPU kernel name.
|
||||
};
|
||||
|
||||
using implementation_forcing_map = std::map<primitive_id, implementation_desc>;
|
||||
|
||||
/// @brief Represents user-provided program build option.
|
||||
struct build_option {
|
||||
/// @brief Allow primitives fusing during program build (default: false).
|
||||
|
70
inference-engine/thirdparty/clDNN/api/cldnn/primitives/implementation_desc.hpp
vendored
Normal file
70
inference-engine/thirdparty/clDNN/api/cldnn/primitives/implementation_desc.hpp
vendored
Normal file
@ -0,0 +1,70 @@
|
||||
// Copyright (C) 2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "cldnn/runtime/tensor.hpp"
|
||||
|
||||
#include <map>
|
||||
#include <ostream>
|
||||
|
||||
namespace cldnn {
|
||||
|
||||
/// @brief Primitives implementation type.
|
||||
enum class impl_types : uint8_t {
|
||||
cpu = 1 << 0,
|
||||
common = 1 << 1,
|
||||
ocl = 1 << 2,
|
||||
any = 0xFF,
|
||||
};
|
||||
|
||||
inline impl_types operator&(impl_types a, impl_types b) {
|
||||
typedef std::underlying_type<impl_types>::type underlying_type;
|
||||
return static_cast<impl_types>(static_cast<underlying_type>(a) & static_cast<underlying_type>(b));
|
||||
}
|
||||
|
||||
inline impl_types operator|(impl_types a, impl_types b) {
|
||||
typedef std::underlying_type<impl_types>::type underlying_type;
|
||||
return static_cast<impl_types>(static_cast<underlying_type>(a) | static_cast<underlying_type>(b));
|
||||
}
|
||||
|
||||
inline impl_types operator~(impl_types a) {
|
||||
typedef std::underlying_type<impl_types>::type underlying_type;
|
||||
return static_cast<impl_types>(~static_cast<underlying_type>(a));
|
||||
}
|
||||
|
||||
inline std::ostream& operator<<(std::ostream& out, const impl_types& impl_type) {
|
||||
switch (impl_type) {
|
||||
case impl_types::cpu: out << "cpu"; break;
|
||||
case impl_types::common: out << "common"; break;
|
||||
case impl_types::ocl: out << "ocl"; break;
|
||||
case impl_types::any: out << "any"; break;
|
||||
default: out << "unknown"; break;
|
||||
}
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
/// @brief Description of primitives implementation.
|
||||
struct implementation_desc {
|
||||
format::type output_format; ///< Output format.
|
||||
std::string kernel_name; ///< GPU kernel name.
|
||||
impl_types impl_type; ///< GPU implementation type.
|
||||
|
||||
implementation_desc() :
|
||||
output_format(format::any),
|
||||
kernel_name(""),
|
||||
impl_type(impl_types::any) {}
|
||||
|
||||
implementation_desc(format::type output_format,
|
||||
std::string kernel_name,
|
||||
impl_types impl_type = impl_types::any) :
|
||||
output_format(output_format),
|
||||
kernel_name(kernel_name),
|
||||
impl_type(impl_type) {}
|
||||
};
|
||||
|
||||
using implementation_forcing_map = std::map<primitive_id, implementation_desc>;
|
||||
|
||||
} // namespace cldnn
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user