Compare commits

...

50 Commits

Author SHA1 Message Date
Tomasz Dołbniak
304c37216a Clone a specific tag for pybind11 (#2297) 2020-09-16 23:04:47 +03:00
Nikolay Shchegolev
5e4377b250 [CPU] RNN layer. Blobs precision validation. (#2223) 2020-09-16 18:19:14 +03:00
Nikolay Shchegolev
1e5edf86a1 Statically analyzed issues. (#2274) 2020-09-16 18:09:59 +03:00
Irina Efode
12abb2eb49 [IE TESTS] CoreThreading_LoadNetwork tests were disabled for GPU plugin (#2245) 2020-09-16 16:46:02 +03:00
Piotr Szmelczynski
1337997134 Test refactor (#2225) 2020-09-16 15:33:02 +02:00
Alexey Suhov
fe49d5743b update OpenCV version to 4.5.0 (#2254)
* update OpenCV version to 4.5.0

* fix Azure pipelines
2020-09-16 16:13:41 +03:00
Ilya Churaev
ce9c171f46 Fixed KW warning and review issues (#2263) 2020-09-16 15:33:23 +03:00
Gorokhov Dmitriy
83e96891ca Revert "[IE TESTS] dynavic batch for mvn layer (#1010)" (#2257)
This reverts commit 2e3378c50f.
2020-09-16 14:11:48 +03:00
Evgeny Latkin
339cd5e49e [IE][VPU]: update firmware 1378 (#2182) 2020-09-16 13:51:33 +03:00
Andrew Bakalin
18521f2dcb [VPU] Fix K propagation through Reshape (#2184)
* [VPU][DTS] Fix K propagation through Reshape

* [VPU] Add test cases
2020-09-16 12:41:49 +03:00
Anton Potapov
d590144545 [PP GAPI] Addded tests to cover exisiting precision conversions done by (#1976)
some plugins

- added shared parameterized tests
- instantiated for template plugin
- instantiated for cpu plugin
- fixed CPU plugin to properly handle U16 input
- fixed CPU reverse_sequence primitive to alolw input/oputput tensors to
be in FP32 only
- updated ngraph test_simple_computation_on_ndarrays to not expect
failure on U16 input
2020-09-16 12:41:14 +03:00
Svetlana Dolinina
cdedc4af19 added check to avoid IR generation in case of wrong input shape (#2127)
* added check to avoid IR generation in case of wrong input shape

* review changes
2020-09-16 11:29:05 +03:00
Maxim Kurin
cb70ae064b [IE][VPU][OpenCL] 2021.1 release compiler (#2166) 2020-09-16 00:44:25 +03:00
Evgeny Talanin
83238b23db Revert "Eliminated invalid subgraphs (#2196)" (#2250)
This reverts commit 89a6f926a4.
2020-09-15 19:24:57 +03:00
Konstantin Satunin
25ba600db1 Added Public CI badge to main page (#2241) 2020-09-15 17:39:32 +03:00
Anna Alberska
3ecee2ce49 [GNA] fix scale factor calculation for unfused bias after fc (#2097)
* [GNA] fix scale factor calculation for unfused bias after fc

* change check

* add test

* apply requested changes

* cpplint fix

* apply test changes

* modify model for test to match ::op::
2020-09-15 16:04:06 +03:00
Mateusz Bencer
e55653b519 Fix running nG Python tests on Windows and update doc (#2162) 2020-09-15 15:02:48 +02:00
Gladilov, Gleb
6e0611566c [IE][VPU]: Merges transformations calls from LoadExeNetwork and Frontend (#2220)
Signed-off-by: Gladilov, Gleb <gleb.gladilov@intel.com>
2020-09-15 15:28:28 +03:00
Ilya Lavrenov
9ca5fbaf02 Reshape v7: remove (#1379)
* Removed shape inference fr IR v7 and older

* Disabled dynamic batch tests which require reshape

* Fixes tests 2

* Disabled MKLDNN tests with convolution reshape

* Fixed GPU tests

* Disable VPU tests with batch size > 1 for old IRs

* Removed most of shape infer functions for old representation

* Removed most of CNNLayer validators

* Fixed validators and keep only parseParams

* Removed tests on invalid IR v7

* Disabled more VPU tests

* Removed Backetize validator

* Disable one more Myriad tests case where reshape for old IR is needed

* Removed useless reshape

* Need to replace GRUCell with Unique

* Moved shape infer functions for experimental layers to Core IE

* Fixed shape inference functions not to depend on legacy

* Added missed SparseToDense

* Added descriptive error message

* Fixed comments
2020-09-15 15:08:17 +03:00
Roman Vyunov (Intel)
9e8b42ff95 [IE][VPU]: Workaround to support parameter Beta for layer Swish (#2205)
* Workaround to full support Swish layer. It is faster than native Swish for now.
2020-09-15 14:39:27 +03:00
Vladislav Vinogradov
a0938a92d4 [IE][TESTS] Fix compareRawBuffers and compareBlobData methods (#2222)
Use `<=` comparison instead of `<` with thresholds.
This allows to use `0` threshold for bit-exact comparison.
2020-09-15 14:04:47 +03:00
Anton Pankratv
89a6f926a4 Eliminated invalid subgraphs (#2196) 2020-09-15 14:03:24 +03:00
Ilya Churaev
e3174fa752 nGraph some KW fixes (#2102)
* Removed redundant methods

* Fixed KW for linux
2020-09-15 14:03:07 +03:00
Ilya Churaev
3cecc3ffbe Extend error message (#2175) 2020-09-15 14:02:42 +03:00
Ilya Churaev
1bae5504ca Fixed query network for networks with KSO (#2201)
* Added a test to reproduce QueryNetwork with KSO

* Fixed QueryNetwork for networks with KSO

* Added additional test
2020-09-15 14:02:15 +03:00
Ilya Churaev
baac903cdc Fixed output names for case with redundant ops before result (#2210) 2020-09-15 14:01:43 +03:00
Maksim Doronin
27c03b35be [IE][VPU]: Some KW fixes (#2142)
* Some KW fixes
* Fix printTo in vpu ngraph transformations
2020-09-15 12:42:16 +03:00
Denis Orlov
eea5acaacc [GNA] Safety fixes (#2158) 2020-09-15 11:24:17 +03:00
Gladilov, Gleb
e4f0d8053a [IE][VPU][Tests]: Enables tests on MergeSubsequentDSROperations (#2149)
Signed-off-by: Gladilov, Gleb <gleb.gladilov@intel.com>
2020-09-15 10:44:25 +03:00
Ivan Tikhonov
cd722d72df TensorIterator to RNN/GRU/LSTM Sequence transformation (#2146)
* ti to sequences transformations

* fix sequences to sequences ie conversion

* resolve review marks

* resolve review remarks, fix ti to sequences transformations to support batch > 1 if slice axis == 0

* temporary enable ngraph ti transformations for cpu plugin

* fix includes

* Revert "fix includes"

This reverts commit 6cf15b97be.

* Revert "temporary enable ngraph ti transformations for cpu plugin"

This reverts commit fd528d7216.

* delete todo comments
2020-09-15 10:11:51 +03:00
Edward Shogulin
ac2370b420 [LPT] Copy constant with several outputs before blob update (cherry-pick to master) (#2198)
* [LPT] Copy constant implementation

* [LPT] the same Constant ops as FQ interval boundaries
2020-09-15 09:18:58 +03:00
Artyom Anokhov
ff3c5fce99 setupvars: update logic for setting path-like vars in case if they are empty. Updated copyrights. (#2229) 2020-09-14 19:49:57 +03:00
Svetlana Dolinina
43d6bf045b GatherTree description was extended and outdated link fixed (#2167)
* add more alrifications to description

* move clarification to comment

* pseudo code become more accurate

* review changes
2020-09-14 19:49:29 +03:00
Alexey Suhov
5d59a112d7 Revert "update OpenCV version to 4.5.0"
This reverts commit 5d229a4564.
2020-09-14 18:47:19 +03:00
Alexey Suhov
5d229a4564 update OpenCV version to 4.5.0 2020-09-14 17:58:39 +03:00
Maxim Andronov
8cf84f58e7 [CPU] add check on equality of values IL and IH ranges (#2190) 2020-09-14 15:28:37 +03:00
Rafal Blaczkowski
706d2fe7c8 Add backend tests to ONNX OpenVino CI (#1972) 2020-09-14 13:07:47 +02:00
Andrey Somsikov
6d2424ddcf Define security policy (#2215) 2020-09-14 13:50:03 +03:00
Anastasia Kuporosova
f7ee106b21 [Python Tools] Fix several problems in cross-check-tool (#2170) 2020-09-14 13:49:01 +03:00
Evgenya Stepyreva
75601e62ed Super smart reshape: HC Reshape to 2D followed by MatMul (#2183)
* Initial commit

* [SSR] Reshape(2D)->MatMul constrain relaxation

* Moved common pattern mechanics to the common function

* Moving SmartReshape to CNNNetworkNgraphImpl ctors

* Review comment

* Tests
2020-09-14 13:45:27 +03:00
Andrey Somsikov
f84a6d97ac Split time-tests common library (#2173)
Split time-tests common library

Add a README.md with workflow description.
Defined "timetest_" suffix for all time tests.
Applied clang-format-9 and added a README.md

Co-authored-by: Alina Alborova <alina.alborova@intel.com>
2020-09-14 09:04:49 +03:00
Evgenya Stepyreva
1007b05104 [DOC] Reshape feature (#2023)
* [DOC] Reshape feature

* Comments adressed.

* Clarifications

* Converting_Model_General.md

* Update ShapeInference.md

* Update ShapeInference.md

* Comments

* Update Convert_Object_Detection_API_Models.md

Co-authored-by: Andrey Zaytsev <andrey.zaytsev@intel.com>
2020-09-11 21:41:42 +03:00
Gleb Kazantaev
1f555149dd Added type info for transformations (#2116) 2020-09-11 15:38:43 +03:00
Anton Chetverikov
e6e7f5158a Fix Mish and SoftPlus value propagation functions (#2120)
* Fix Mish and SoftPlus value propagation functions

* Add unit tests for SoftPlus & Mish operations value propagation functions
2020-09-11 12:58:14 +03:00
Anastasia Kuporosova
1fd2df6e0d [PYTHON API TESTS] Add ngraph tests (#2132)
Co-authored-by: Alexander Zhogov <alexander.zhogov@intel.com>
2020-09-11 12:56:57 +03:00
Vladislav Volkov
ba254d7669 Fix for static PartialShape detection algorithm (#2106) 2020-09-11 06:15:24 +03:00
Ivan Tikhonov
a20d7ba384 Added callback to disable PriorBox to PriorBoxIE transformation (#2159)
* move priorbox to ie transformations to Opset1ToLegacyOpset pipeline

* fix typo

* Revert "fix typo"

This reverts commit 4077a78cbd.

* Revert "move priorbox to ie transformations to Opset1ToLegacyOpset pipeline"

This reverts commit 910e41ff20.

* add functionality to disable prior box to ie transformation

* fix callback
2020-09-11 06:12:14 +03:00
Sergey Shlyapnikov
f157319792 [IE CLDNN] Memory allocation optimizations (#2007) 2020-09-10 23:18:26 +03:00
Evgeny Talanin
e23f575847 Add exposing function signatures via Cython (#2024) 2020-09-10 19:58:45 +03:00
Vitaliy Urusovskij
a8f8ab8c12 Implement run_executable.py to run TimeTests several times (#2125) 2020-09-10 19:41:23 +03:00
490 changed files with 5825 additions and 10921 deletions

View File

@@ -79,5 +79,5 @@ ENV NGRAPH_CPP_BUILD_PATH=/openvino/dist
ENV LD_LIBRARY_PATH=/openvino/dist/lib
ENV NGRAPH_ONNX_IMPORT_ENABLE=TRUE
ENV PYTHONPATH=/openvino/bin/intel64/Release/lib/python_api/python3.8:${PYTHONPATH}
RUN git clone --recursive https://github.com/pybind/pybind11.git
RUN git clone --recursive https://github.com/pybind/pybind11.git -b v2.5.0 --depth 1
CMD tox

View File

@@ -1,6 +1,7 @@
# [OpenVINO™ Toolkit](https://01.org/openvinotoolkit) - Deep Learning Deployment Toolkit repository
[![Stable release](https://img.shields.io/badge/version-2020.4-green.svg)](https://github.com/openvinotoolkit/openvino/releases/tag/2020.4.0)
[![Apache License Version 2.0](https://img.shields.io/badge/license-Apache_2.0-green.svg)](LICENSE)
![Azure DevOps builds (branch)](https://img.shields.io/azure-devops/build/openvinoci/b2bab62f-ab2f-4871-a538-86ea1be7d20f/9/master?label=Public%20CI)
This toolkit allows developers to deploy pre-trained deep learning models
through a high-level C++ Inference Engine API integrated with application logic.

13
SECURITY.md Normal file
View File

@@ -0,0 +1,13 @@
# Security Policy
## Reporting a Vulnerability
Please report about security issues or vulnerabilities you find to Intel
[Security Center].
For more information on how Intel works to resolve security issues, see:
[Vulnerability Handling Guidelines]
[Security Center]:https://www.intel.com/security
[Vulnerability Handling Guidelines] https://www.intel.com/content/www/us/en/security-center/vulnerability-handling-guidelines.html

View File

@@ -336,14 +336,14 @@ jobs:
# Add for gtest-parallel, it hangs now (CVS-33386)
#python $(BUILD_DIR)\gtest-parallel\gtest-parallel $(BIN_DIR)\MklDnnFunctionalTests --workers=$(WORKERS_NUMBER) --print_test_times --dump_json_test_results=MklDnnFunctionalTests.json -- --gtest_print_time=1
- script: |
set PATH=$(REPO_DIR)\inference-engine\temp\tbb\bin;$(REPO_DIR)\inference-engine\temp\opencv_4.3.0\opencv\bin;%PATH%
set PATH=$(REPO_DIR)\inference-engine\temp\tbb\bin;$(REPO_DIR)\inference-engine\temp\opencv_4.5.0\opencv\bin;%PATH%
set DATA_PATH=$(BUILD_DIR)\testdata
set MODELS_PATH=$(BUILD_DIR)\testdata
$(BIN_DIR)\MklDnnFunctionalTests --gtest_print_time=1
displayName: 'MklDnnFunctionalTests'
continueOnError: false
- script: |
set PATH=$(REPO_DIR)\inference-engine\temp\tbb\bin;$(REPO_DIR)\inference-engine\temp\opencv_4.3.0\opencv\bin;%PATH%
set PATH=$(REPO_DIR)\inference-engine\temp\tbb\bin;$(REPO_DIR)\inference-engine\temp\opencv_4.5.0\opencv\bin;%PATH%
set DATA_PATH=$(BUILD_DIR)\testdata
set MODELS_PATH=$(BUILD_DIR)\testdata
$(BIN_DIR)\InferenceEngineCAPITests

View File

@@ -382,7 +382,7 @@ cmake -G "Visual Studio 15 2017 Win64" -T "Intel C++ Compiler 18.0" ^
6. Before running the samples, add paths to the TBB and OpenCV binaries used for
the build to the `%PATH%` environment variable. By default, TBB binaries are
downloaded by the CMake-based script to the `<openvino_repo>/inference-engine/temp/tbb/bin`
folder, OpenCV binaries to the `<openvino_repo>/inference-engine/temp/opencv_4.3.0/opencv/bin`
folder, OpenCV binaries to the `<openvino_repo>/inference-engine/temp/opencv_4.5.0/opencv/bin`
folder.
### Additional Build Options

View File

@@ -1,38 +1,61 @@
Using Shape Inference {#openvino_docs_IE_DG_ShapeInference}
==========================================
Inference Engine takes two kinds of model description as an input: [Intermediate Representation (IR)](../MO_DG/IR_and_opsets.md) and [nGraph::Function](nGraph_Flow.md) objects.
Both should have fixed input shapes to be successfully loaded to the Inference Engine.
To feed input data of a shape that is different from the model input shape, resize the model first.
Inference Engine takes three kinds of a model description as an input, which are converted into an `InferenceEngine::CNNNetwork` object:
1. [Intermediate Representation (IR)](../MO_DG/IR_and_opsets.md) through `InferenceEngine::Core::ReadNetwork`
2. [ONNX model](../IE_DG/OnnxImporterTutorial.md) through `InferenceEngine::Core::ReadNetwork`
3. [nGraph::Function](../IE_DG/nGraph_Flow.md) through the constructor of `InferenceEngine::CNNNetwork`
Model resizing on the stage of <a href="_docs_MO_DG_prepare_model_convert_model_Converting_Model_General.html#when_to_specify_input_shapes">IR generation</a> or [nGraph::Function creation](nGraphTutorial.md) is the recommended approach.
OpenVINO™ provides the following experimental methods for runtime model reshaping:
`InferenceEngine::CNNNetwork` keeps an `ngraph::Function` object with the model description internally.
The object should have fully defined input shapes to be successfully loaded to the Inference Engine plugins.
To resolve undefined input dimensions of a model, call the `CNNNetwork::reshape` method providing new input shapes before loading to the Inference Engine plugin.
1. Setting a new input shape with the `InferenceEngine::CNNNetwork::reshape` method
`InferenceEngine::CNNNetwork::reshape` method updates input shapes and propagates them down to the outputs of the model through all intermediate layers.
Shape propagation for `InferenceEngine::CNNNetwork` objects created from `nGraph::Function` or IR of the version 10 works through the `nGraph` shape inference mechanism.
`InferenceEngine::CNNNetwork` objects created from lower IR versions are considered deprecated and may be reshaped incorrectly or give unexpected results.
To keep the v10 IR resizable by the `InferenceEngine::CNNNetwork::reshape` method, convert the model with the additional Model Optimizer key `--keep_shape_ops`.
2. Setting a new batch dimension value with the `InferenceEngine::CNNNetwork::setBatchSize` method
The meaning of a model batch may vary depending on choices you made during the model designing.
The `InferenceEngine::CNNNetwork::setBatchSize` method deduces index of batch dimension relying only on the input rank.
This method does not work for models with a non-zero index batch placement or models with inputs without a batch dimension.
Run the following code right after `InferenceEngine::CNNNetwork` creation to explicitly check for model input names and shapes:
```cpp
CNNNetwork network = ... // read IR / ONNX model or create from nGraph::Function explicitly
const auto parameters = network.getFunction()->get_parameters();
for (const auto & parameter : parameters) {
std::cout << "name: " << parameter->get_friendly_name() << " shape: " << parameter->get_partial_shape() << std::endl;
if (parameter->get_partial_shape().is_dynamic())
std::cout << "ATTENTION: Input shape is not fully defined. Use the CNNNetwork::reshape method to resolve it." << std::endl;
}
```
Batch-setting algorithm does not involve shape inference mechanism.
Batch of input and output shapes for all layers is set to a new batch value without layer validation.
It may cause both positive and negative side effects.
Due to the limitations described above, the current method is recommended for simple image processing models only.
To feed input data of a shape that is different from the model input shape, reshape the model first.
OpenVINO™ provides the following methods for runtime model reshaping:
Practically, some models are not ready to be resized. In this case, a new input shape cannot be set with the Model Optimizer or the `InferenceEngine::CNNNetwork::reshape` method.
* **Set a new input shape** with the `InferenceEngine::CNNNetwork::reshape` method.<br>
The `InferenceEngine::CNNNetwork::reshape` method updates input shapes and propagates them down to the outputs of the model through all intermediate layers.
You can reshape a model multiple times like in this application scheme:
```
ReadNetwork -> reshape(input_1_shape) -> LoadNetwork -> infer(input_1)
\
-> reshape(input_2_shape) -> LoadNetwork -> infer(input_2)
```
> **NOTES**:
> - Starting with the 2021.1 release, the Model Optimizer converts topologies keeping shape-calculating sub-graphs by default, which enables correct shape propagation during reshaping.
> - Older versions of IRs are not guaranteed to reshape successfully. Please regenerate them with the Model Optimizer of the latest version of OpenVINO™.<br>
> - If an ONNX model does not have a fully defined input shape and the model was imported with the ONNX importer, reshape the model before loading it to the plugin.
* **Set a new batch dimension value** with the `InferenceEngine::CNNNetwork::setBatchSize` method.<br>
The meaning of a model batch may vary depending on the model design.
The `InferenceEngine::CNNNetwork::setBatchSize` method deduces the index of a batch dimension based only on the input rank.
This method does not work for models with a non-zero index batch placement or models with inputs without a batch dimension.
The batch-setting algorithm does not involve the shape inference mechanism.
Batch of input and output shapes for all layers is set to a new batch value without layer validation.
It may cause both positive and negative side effects.
Due to the limitations described above, the current method is not recommended to use.
If you need to set a new batch size for the model, use the `CNNNetwork::reshape` method instead.
## Troubleshooting Resize Errors
Do not use runtime reshaping methods simultaneously, especially do not call the `CNNNetwork::reshape` method after you use `InferenceEngine::CNNNetwork::setBatchSize`.
The `InferenceEngine::CNNNetwork::setBatchSize` method causes irreversible conversion of the internal model representation into the legacy model representation.
The method does not use nGraph for shape inference which leads to reduced reshape opportunities and may affect the performance of the model.
There are other approaches to reshape the model during the stage of <a href="_docs_MO_DG_prepare_model_convert_model_Converting_Model_General.html#when_to_specify_input_shapes">IR generation</a> or [nGraph::Function creation](../IE_DG/nGraphTutorial.md).
Practically, some models are not ready to be reshaped. In this case, a new input shape cannot be set with the Model Optimizer or the `InferenceEngine::CNNNetwork::reshape` method.
## Troubleshooting Reshape Errors
Operation semantics may impose restrictions on input shapes of the operation.
Shape collision during shape propagation may be a sign that a new shape does not satisfy the restrictions.
@@ -42,7 +65,7 @@ Examples of such operations:
- <a href="_docs_MO_DG_prepare_model_convert_model_IR_V10_opset1.html#Reshape">`Reshape` operation</a> with a hard-coded output shape value
- <a href="_docs_MO_DG_prepare_model_convert_model_IR_V10_opset1.html#MatMul">`MatMul` operation</a> with the `Const` second input cannot be resized by spatial dimensions due to operation semantics
Model structure and logic should not change significantly after resizing.
Model structure and logic should not change significantly after model reshaping.
- The Global Pooling operation is commonly used to reduce output feature map of classification models output.
Having the input of the shape [N, C, H, W], Global Pooling returns the output of the shape [N, C, 1, 1].
Model architects usually express Global Pooling with the help of the `Pooling` operation with the fixed kernel size [H, W].
@@ -50,12 +73,12 @@ During spatial reshape, having the input of the shape [N, C, H1, W1], Pooling wi
It breaks the classification model structure.
For example, [publicly available Inception family models from TensorFlow*](https://github.com/tensorflow/models/tree/master/research/slim#pre-trained-models) have this issue.
- Resizing the model input shape may significantly affect its accuracy.
- Changing the model input shape may significantly affect its accuracy.
For example, Object Detection models from TensorFlow have resizing restrictions by design.
To keep the model valid after the reshape, choose a new input shape that satisfies conditions listed in the `pipeline.config` file.
For details, refer to the <a href="_docs_MO_DG_prepare_model_convert_model_tf_specific_Convert_Object_Detection_API_Models.html#tf_od_custom_input_shape">Tensorflow Object Detection API models resizing techniques</a>.
## Usage of Reshape Method
## Usage of Reshape Method <a name="usage_of_reshape_method"></a>
The primary method of the feature is `InferenceEngine::CNNNetwork::reshape`.
It gets new input shapes and propagates it from input to output for all intermediates layers of the given network.

View File

@@ -126,8 +126,10 @@ Framework-agnostic parameters:
value, for example: "node_name->True". It will be
DEPRECATED in future releases. Use --input option to
specify a value for freezing.
--static_shape Enables `ShapeOf` operation with all children folding to `Constant`.
This option makes model not reshapable in Inference Engine
--static_shape Enables IR generation for fixed input shape (folding
`ShapeOf` operations and shape-calculating sub-graphs
to `Constant`). Changing model input shape using
the Inference Engine API in runtime may fail for such an IR.
--disable_weights_compression
Disable compression and store weights with original
precision.

View File

@@ -1,9 +1,7 @@
# Converting TensorFlow* Object Detection API Models {#openvino_docs_MO_DG_prepare_model_convert_model_tf_specific_Convert_Object_Detection_API_Models}
> **NOTES**:
>
> * Starting with the 2019 R1 release, the Model Optimizer supports the `--keep_shape_ops` command line parameter that allows you to convert the TensorFlow\* Object Detection API Faster and Mask RCNNs topologies so they can be re-shaped in the Inference Engine using dedicated reshape API. Refer to [Using Shape Inference](../../../../IE_DG/ShapeInference.md) for more information on how to use this feature. It is possible to change the both spatial dimensions of the input image and batch size.
> * Starting with the 2018 R4 release, the Model Optimizer supports the `--input_shape` command line parameter for the TensorFlow\* Object Detection API topologies. Refer to the [Custom Input Shape](#tf_od_custom_input_shape) for more information.
> * Starting with the 2021.1 release, the Model Optimizer converts the TensorFlow\* Object Detection API SSDs, Faster and Mask RCNNs topologies keeping shape-calculating sub-graphs by default, so topologies can be re-shaped in the Inference Engine using dedicated reshape API. Refer to [Using Shape Inference](../../../../IE_DG/ShapeInference.md) for more information on how to use this feature. It is possible to change the both spatial dimensions of the input image and batch size.
> * To generate IRs for SSD topologies, the Model Optimizer creates a number of `PriorBoxClustered` layers instead of a constant node with prior boxes calculated for the particular input image size. This change allows you to reshape the topology in the Inference Engine using dedicated Inference Engine API. The reshaping is supported for all SSD topologies except FPNs which contain hardcoded shapes for some operations preventing from changing topology input shape.
## How to Convert a Model

View File

@@ -8,21 +8,33 @@
**Detailed description**
GatherTree operation implements the same algorithm as GatherTree operation in TensorFlow. Please see complete documentation [here](https://www.tensorflow.org/versions/r1.12/api_docs/python/tf/contrib/seq2seq/gather_tree?hl=en).
The GatherTree operation implements the same algorithm as the [GatherTree operation in TensorFlow](https://www.tensorflow.org/addons/api_docs/python/tfa/seq2seq/gather_tree).
Pseudo code:
```python
final_idx[ :, :, :] = end_token
for batch in range(BATCH_SIZE):
for beam in range(BEAM_WIDTH):
max_sequence_in_beam = min(MAX_TIME, max_seq_len[batch])
parent = parent_idx[max_sequence_in_beam - 1, batch, beam]
final_idx[max_sequence_in_beam - 1, batch, beam] = step_idx[max_sequence_in_beam - 1, batch, beam]
for level in reversed(range(max_sequence_in_beam - 1)):
final_idx[level, batch, beam] = step_idx[level, batch, parent]
parent = parent_idx[level, batch, parent]
# For a given beam, past the time step containing the first decoded end_token
# all values are filled in with end_token.
finished = False
for time in range(max_sequence_in_beam):
if(finished):
final_idx[time, batch, beam] = end_token
elif(final_idx[time, batch, beam] == end_token):
finished = True
```
Element data types for all input tensors should match each other.

View File

@@ -0,0 +1,29 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "multi-device/multi_device_config.hpp"
#include "behavior/preprocessing.hpp"
using namespace BehaviorTestsDefinitions;
namespace {
const std::vector<InferenceEngine::Precision> inputPrecisions = {
InferenceEngine::Precision::U8,
InferenceEngine::Precision::FP32
};
const std::vector<std::map<std::string, std::string>> configs = {
{}
};
INSTANTIATE_TEST_CASE_P(PreprocessingPrecisionConvertTests, PreprocessingPrecisionConvertTest,
::testing::Combine(
::testing::ValuesIn(inputPrecisions),
::testing::Values("TEMPLATE"),
::testing::ValuesIn(configs)),
PreprocessingPrecisionConvertTest::getTestCaseName);
} // namespace

View File

@@ -181,9 +181,9 @@ endif ()
if (ENABLE_OPENCV)
reset_deps_cache(OpenCV_DIR)
set(OPENCV_VERSION "4.3.0")
set(OPENCV_BUILD "060")
set(OPENCV_BUILD_YOCTO "073")
set(OPENCV_VERSION "4.5.0")
set(OPENCV_BUILD "36")
set(OPENCV_BUILD_YOCTO "337")
if (${CMAKE_SYSTEM_PROCESSOR} STREQUAL "aarch64")
if(DEFINED ENV{THIRDPARTY_SERVER_PATH})

View File

@@ -19,8 +19,8 @@ set(VPU_SUPPORTED_FIRMWARES usb-ma2450 usb-ma2x8x pcie-ma248x)
# Default packages
#
set(FIRMWARE_PACKAGE_VERSION 1370)
set(VPU_CLC_MA2X8X_VERSION "movi-cltools-20.09.0")
set(FIRMWARE_PACKAGE_VERSION 1378)
set(VPU_CLC_MA2X8X_VERSION "movi-cltools-20.09.1")
#
# CMake variables to override default firmware files
@@ -191,7 +191,7 @@ function(add_vpu_compile_custom_kernels)
"SHAVE_MA2X8XLIBS_DIR=${VPU_CLC_MA2X8X}/lib"
"SHAVE_MOVIASM_DIR=${VPU_CLC_MA2X8X}/bin"
"SHAVE_MYRIAD_LD_DIR=${VPU_CLC_MA2X8X}/bin"
${VPU_CLC_MA2X8X_COMMAND} --strip-binary-header ${cl_file} -o ${out_file}
${VPU_CLC_MA2X8X_COMMAND} --strip-binary-header -d ma2x8x ${cl_file} -o ${out_file}
MAIN_DEPENDENCY ${cl_file}
DEPENDS ${VPU_CLC_MA2X8X_COMMAND}
COMMENT "[VPU] Compile ${cl_file}"

View File

@@ -1,4 +1,5 @@
#distutils: language=c++
#cython: embedsignature=True
from cython.operator cimport dereference as deref
from libcpp.string cimport string
from libcpp.vector cimport vector

View File

@@ -1,23 +1,13 @@
from openvino.inference_engine import IECore, IENetwork
try:
import ngraph as ng
from ngraph.impl.op import Parameter
from ngraph.impl import Function, Shape, Type
ngraph_available=True
except:
ngraph_available=False
import pytest
import ngraph as ng
from ngraph.impl.op import Parameter
from ngraph.impl import Function, Shape, Type
from conftest import model_path
test_net_xml, test_net_bin = model_path()
if not ngraph_available:
pytest.skip("NGraph is not installed, skip", allow_module_level=True)
def test_create_IENetwork_from_nGraph():
element_type = Type.f32
@@ -58,3 +48,30 @@ def test_get_ops_from_IENetwork():
'28/Reshape/Cast_1955_const', '28/Reshape', 'onnx_initializer_node_17/Output_0/Data__const',
'29/WithoutBiases', 'onnx_initializer_node_18/Output_0/Data_/copy_const', '29', 'fc_out',
'fc_out/sink_port_0']
def test_get_type_name():
ie = IECore()
net = ie.read_network(model=test_net_xml, weights=test_net_bin)
func = ng.function_from_cnn(net)
ops = func.get_ordered_ops()
assert ops[2].get_type_name() == "Convolution"
def test_getting_shapes():
ie = IECore()
net = ie.read_network(model=test_net_xml, weights=test_net_bin)
func = ng.function_from_cnn(net)
ops = func.get_ordered_ops()
shapes = [sh for sh in ops[2].shape]
assert shapes == [1, 16, 32, 32]
def test_get_set_rt_info():
ie = IECore()
net = ie.read_network(model=test_net_xml, weights=test_net_bin)
func = ng.function_from_cnn(net)
ops = func.get_ordered_ops()
rt_info = ops[14].get_rt_info()
rt_info["affinity"] = "test_affinity"
assert ops[14].get_rt_info()["affinity"] == "test_affinity"

View File

@@ -309,7 +309,17 @@ class ScaleFactorPerLayer<InferenceEngine::EltwiseLayer*> {
case InferenceEngine::EltwiseLayer::Sub:
case InferenceEngine::EltwiseLayer::Sum: {
// detect which input will be used as biases
if (LayerInfo(in0).has32BOutput()) {
auto findPrevFunctional = [](InferenceEngine::CNNLayerPtr layer) {
auto prev = InferenceEngine::CNNNetPrevLayer(layer, 0);
while (CNNNetHasPrevLayer(prev.get(), 0) && LayerInfo(prev).isNonFunctional()) {
prev = InferenceEngine::CNNNetPrevLayer(prev, 0);
}
return prev;
};
if (LayerInfo(in0).has32BOutput() ||
(LayerInfo(in0).isNonFunctional() && CNNNetHasPrevLayer(in0.get(), 0) && LayerInfo(findPrevFunctional(in0)).has32BOutput())) {
std::swap(in0, in1);
std::swap(quantParams0, quantParams1);
}

View File

@@ -542,9 +542,11 @@ void GNAGraphCompiler::PowerPrimitive(InferenceEngine::CNNLayerPtr layer) {
connectInput(layer, ptr_inputs, num_data_bytes_in, 0, 0);
if (gnaFlags->sw_fp32) {
IE_ASSERT(quantized == nullptr);
gnamem->readonly().push_value(ptr_weights, power.scale, num_rows_out, 64);
gnamem->readonly().push_value(ptr_biases, power.offset, num_rows_out, 64);
} else {
IE_ASSERT(quantized != nullptr);
auto quantizedScale = FLOAT_TO_INT16(std::min(quantized->_weights_quant.scale * power.scale,
static_cast<float>(INT16_MAX)));
auto quantizedOffset = FLOAT_TO_INT32(std::min(quantized->_dst_quant.scale * power.offset,

View File

@@ -33,7 +33,7 @@ class GNAPlugin : public InferenceEngine::IInferencePlugin {
protected:
std::string _pluginName = "GNA";
Config config;
Config config {};
std::shared_ptr<GNAPluginNS::backend::AMIntelDNN> dnn;
std::shared_ptr<GNAPluginNS::GNAFlags> gnaFlags;
std::shared_ptr<GNAPluginNS::gna_memory_type> gnamem;

View File

@@ -7,9 +7,9 @@
#include <cstdint>
typedef struct {
double slope;
double slope {};
uint64_t slope_scale = 0;
uint32_t slope_scale_index;
uint32_t slope_scale_index {};
} pwl_gna_slope_scale_t;
pwl_gna_slope_scale_t gna_slope(const double slope, const double in_scale, const double out_scale);

View File

@@ -294,6 +294,9 @@ void SubstituteSoftSignPass::run() {
};
auto getNthChild = [](CNNLayerPtr l, int N) {
auto first = getInputTo(l->outData.front()).begin();
auto last = getInputTo(l->outData.front()).end();
IE_ASSERT(first != last);
IE_ASSERT(N <= std::distance(first, last));
std::advance(first, N);
return first->second;
};
@@ -1119,6 +1122,7 @@ void EltwiseSplitOverChannelsPass::run() {
for (size_t k = 0; k != totalSplits; k++) {
auto eltwiseRaw = std::make_shared<EltwiseLayer>(
LayerParams{l->name + "/eltwise/" + std::to_string(k), "Eltwise", Precision::FP32});
IE_ASSERT(eltwiseRaw != nullptr);
eltwiseRaw->_operation = masterEltwise->_operation;
eltwiseRaw->coeff = masterEltwise->coeff;
auto eltwise = quantized ? InferenceEngine::injectData<QuantizedLayerParams>(eltwiseRaw) : eltwiseRaw;

View File

@@ -29,6 +29,7 @@ set(IE_BASE_SOURCE_FILES
${CMAKE_CURRENT_SOURCE_DIR}/ie_parameter.cpp
${CMAKE_CURRENT_SOURCE_DIR}/ie_rtti.cpp
${CMAKE_CURRENT_SOURCE_DIR}/precision_utils.cpp
${CMAKE_CURRENT_SOURCE_DIR}/shape_infer/ie_built_in_holder.cpp
${CMAKE_CURRENT_SOURCE_DIR}/network_serializer.cpp
${CMAKE_CURRENT_SOURCE_DIR}/network_serializer.hpp
${CMAKE_CURRENT_SOURCE_DIR}/system_allocator.cpp
@@ -123,6 +124,7 @@ add_library(${TARGET_NAME}_common_obj OBJECT
target_compile_definitions(${TARGET_NAME}_common_obj PRIVATE IMPLEMENT_INFERENCE_ENGINE_API)
target_include_directories(${TARGET_NAME}_common_obj PRIVATE
"${CMAKE_CURRENT_SOURCE_DIR}"
$<TARGET_PROPERTY:${TARGET_NAME}_transformations,INTERFACE_INCLUDE_DIRECTORIES>
$<TARGET_PROPERTY:${TARGET_NAME}_plugin_api,INTERFACE_INCLUDE_DIRECTORIES>)

View File

@@ -22,6 +22,7 @@
#include <transformations/utils/utils.hpp>
#include <transformations/convert_opset1_to_legacy/convert_one_hot_to_one_hot_ie.hpp>
#include <transformations/smart_reshape/smart_reshape.hpp>
#include "ngraph_ops/eltwise.hpp"
#include "exec_graph_info.hpp"
@@ -29,7 +30,7 @@
#include "ie_itt.hpp"
#include "network_serializer.hpp"
#include "generic_ie.hpp"
#include <legacy/shape_infer/built-in/ie_built_in_holder.hpp>
#include "shape_infer/ie_built_in_holder.hpp"
using namespace std;
using namespace InferenceEngine;
@@ -126,6 +127,10 @@ CNNNetworkNGraphImpl::CNNNetworkNGraphImpl(const std::shared_ptr<Function>& nGra
// Add shape infer method for old operations which are not included to opset1, opset2 and opset3
::ngraph::op::GenericIE::addExtension(_ngraph_function, std::make_shared<ShapeInfer::BuiltInShapeInferHolder>());
ngraph::pass::Manager ssr_manager;
ssr_manager.register_pass<ngraph::pass::SmartReshape>();
ssr_manager.run_passes(_ngraph_function);
reshape();
for (const auto& layer : _ngraph_function->get_parameters()) {
std::string outName = layer->get_friendly_name();

View File

@@ -31,12 +31,6 @@
#include <legacy/cnn_network_impl.hpp>
namespace InferenceEngine {
namespace ShapeInfer {
class Reshaper;
using ReshaperPtr = std::shared_ptr<Reshaper>;
} // namespace ShapeInfer
namespace details {
/**

View File

@@ -14,6 +14,7 @@
#include <vector>
#include "blob_factory.hpp"
#include "shape_infer/ie_ishape_infer_extension.hpp"
#include <legacy/ie_ngraph_utils.hpp>
#include "ngraph/util.hpp"
#include "ngraph/graph_util.hpp"

View File

@@ -12,6 +12,9 @@
#include <ie_core.hpp>
#include <multi-device/multi_device_config.hpp>
#include <ngraph/opsets/opset.hpp>
#include <ngraph/ngraph.hpp>
#include <ngraph/graph_util.hpp>
#include <ngraph/pass/constant_folding.hpp>
#include <cpp_interfaces/exception2status.hpp>
#include "ie_plugin_cpp.hpp"
@@ -294,6 +297,23 @@ public:
QueryNetworkResult res;
auto parsed = parseDeviceNameIntoConfig(deviceName, config);
GetCPPPluginByName(parsed._deviceName).QueryNetwork(network, parsed._config, res);
if (!network.getFunction())
return res;
// WA for constant folded operations (plugins should support all folded ops)
const auto& func = network.getFunction();
auto specialized_function = ngraph::clone_function(*func);
ngraph::pass::ConstantFolding().run_on_function(specialized_function);
std::unordered_set<std::string> operationNames;
for (const auto& op : specialized_function->get_ops())
operationNames.emplace(op->get_friendly_name());
for (const auto& op : func->get_ops()) {
if (operationNames.find(op->get_friendly_name()) != operationNames.end())
continue;
res.supportedLayersMap[op->get_friendly_name()] = deviceName;
}
return res;
}

View File

@@ -0,0 +1,84 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <algorithm>
#include <memory>
#include <string>
#include "shape_infer/ie_built_in_holder.hpp"
#include "shape_infer/ie_detectionoutput_onnx_shape_infer.hpp"
#include "shape_infer/ie_priorgridgenerator_onnx_shape_infer.hpp"
#include "shape_infer/ie_proposal_onnx_shape_infer.hpp"
#include "shape_infer/ie_proposal_shape_infer.hpp"
#include "shape_infer/ie_rnn_cell_shape_infer.hpp"
#include "shape_infer/ie_roifeatureextractor_onnx_shape_infer.hpp"
#include "shape_infer/ie_simpler_nms_shape_infer.hpp"
#include "shape_infer/ie_sparse_to_dense_shape_infer.hpp"
#include "shape_infer/ie_topkrois_onnx_shape_infer.hpp"
#include "shape_infer/ie_unique_shape_infer.hpp"
#include "shape_infer/ie_sparse_to_dense_shape_infer.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
BuiltInShapeInferHolder::ImplsHolder::Ptr BuiltInShapeInferHolder::GetImplsHolder() {
static ImplsHolder::Ptr localHolder;
if (localHolder == nullptr) {
localHolder = std::make_shared<ImplsHolder>();
}
return localHolder;
}
void BuiltInShapeInferHolder::AddImpl(const std::string& name, const IShapeInferImpl::Ptr& impl) {
GetImplsHolder()->list[name] = impl;
}
StatusCode BuiltInShapeInferHolder::getShapeInferTypes(char**& types, unsigned int& size, ResponseDesc* resp) noexcept {
auto& factories = GetImplsHolder()->list;
types = new char*[factories.size()];
size = 0;
for (auto it = factories.begin(); it != factories.end(); it++, size++) {
types[size] = new char[it->first.size() + 1];
std::copy(it->first.begin(), it->first.end(), types[size]);
types[size][it->first.size()] = '\0';
}
return OK;
}
StatusCode BuiltInShapeInferHolder::getShapeInferImpl(IShapeInferImpl::Ptr& impl, const char* type,
ResponseDesc* resp) noexcept {
auto& impls = BuiltInShapeInferHolder::GetImplsHolder()->list;
if (impls.find(type) != impls.end()) {
impl = impls[type];
return OK;
}
impl.reset();
return NOT_FOUND;
}
template <typename Impl>
class ImplRegisterBase {
public:
explicit ImplRegisterBase(const std::string& type) {
BuiltInShapeInferHolder::AddImpl(type, std::make_shared<Impl>(type));
}
};
#define REG_SHAPE_INFER_FOR_TYPE(__prim, __type) \
static ImplRegisterBase<__prim> __bi_reg__##__type(#__type)
REG_SHAPE_INFER_FOR_TYPE(ExperimentalDetectronDetectionOutputShapeProp, ExperimentalDetectronDetectionOutput);
REG_SHAPE_INFER_FOR_TYPE(ExperimentalDetectronPriorGridGeneratorShapeProp, ExperimentalDetectronPriorGridGenerator);
REG_SHAPE_INFER_FOR_TYPE(ExperimentalDetectronGenerateProposalsSingleImageShapeProp, ExperimentalDetectronGenerateProposalsSingleImage);
REG_SHAPE_INFER_FOR_TYPE(ExperimentalDetectronROIFeatureExtractorShapeProp, ExperimentalDetectronROIFeatureExtractor);
REG_SHAPE_INFER_FOR_TYPE(ExperimentalDetectronTopKROIsShapeProp, ExperimentalDetectronTopKROIs);
REG_SHAPE_INFER_FOR_TYPE(SimplerNMSShapeProp, SimplerNMS);
REG_SHAPE_INFER_FOR_TYPE(SparseToDenseShapeProp, SparseToDense);
REG_SHAPE_INFER_FOR_TYPE(ProposalShapeProp, Proposal);
REG_SHAPE_INFER_FOR_TYPE(RNNCellShapeProp, RNNCell);
REG_SHAPE_INFER_FOR_TYPE(GRUCellShapeProp, GRUCell);
REG_SHAPE_INFER_FOR_TYPE(UniqueShapeProp, Unique);
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@@ -13,17 +13,15 @@
#include <description_buffer.hpp>
#include "caseless.hpp"
#include <legacy/ie_ishape_infer_extension.hpp>
#include "shape_infer/ie_ishape_infer_extension.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
IE_SUPPRESS_DEPRECATED_START
/**
*@brief Holder of shape infer implementations for build-in IE layers, that plugins support out-of-the-box
*/
class INFERENCE_ENGINE_API_CLASS(BuiltInShapeInferHolder) : public IShapeInferExtension {
class BuiltInShapeInferHolder : public IShapeInferExtension {
struct ImplsHolder {
using Ptr = std::shared_ptr<ImplsHolder>;
InferenceEngine::details::caseless_map<std::string, IShapeInferImpl::Ptr> list;
@@ -48,7 +46,5 @@ private:
static ImplsHolder::Ptr GetImplsHolder();
};
IE_SUPPRESS_DEPRECATED_END
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@@ -0,0 +1,145 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_iextension.h>
#include <shape_infer/ie_ishape_infer_extension.hpp>
#include <description_buffer.hpp>
#include <list>
#include <map>
#include <memory>
#include <string>
#include <vector>
namespace InferenceEngine {
inline std::string GetParamAsString(const char* param, const std::map<std::string, std::string> & params) {
auto it = params.find(param);
if (it == params.end()) {
THROW_IE_EXCEPTION << "No such parameter name '" << param << "'";
}
return (*it).second;
}
inline int GetParamAsInt(const char* param, const std::map<std::string, std::string> & params) {
std::string val = GetParamAsString(param, params);
try {
return std::stoi(val);
} catch (...) {
THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " from IR for layer. Value "
<< val << " cannot be casted to int.";
}
}
inline bool GetParamAsBool(const char* param, const std::map<std::string, std::string> & params) {
std::string val = GetParamAsString(param, params);
std::string loweredCaseValue;
std::transform(val.begin(), val.end(), std::back_inserter(loweredCaseValue), [](char value) {
return static_cast<char>(std::tolower(value));
});
bool result = false;
if (!(std::istringstream(loweredCaseValue) >> std::boolalpha >> result)) {
// attempting parse using non alpha bool
return (GetParamAsInt(param, params) != 0);
}
return result;
}
std::string GetParamAsString(const char* param, const char* def,
const std::map<std::string, std::string> & params) {
auto it = params.find(param);
if (it == params.end() || it->second.empty()) {
return def;
}
return (*it).second;
}
int GetParamAsInt(const char* param, int def,
const std::map<std::string, std::string> & params) {
std::string val = GetParamAsString(param, std::to_string(def).c_str(), params);
try {
return std::stoi(val);
} catch (...) {
THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " from IR for layer. Value "
<< val << " cannot be casted to int.";
}
}
bool GetParamAsBool(const char* param, bool def,
const std::map<std::string, std::string> & params) {
std::string val = GetParamAsString(param, std::to_string(def).c_str(), params);
std::string loweredCaseValue;
std::transform(val.begin(), val.end(), std::back_inserter(loweredCaseValue), [](char value) {
return static_cast<char>(std::tolower(value));
});
bool result = false;
if (!(std::istringstream(loweredCaseValue) >> std::boolalpha >> result)) {
// attempting parse using non alpha bool
return (GetParamAsInt(param, def, params) != 0);
}
return result;
}
inline unsigned int GetParamAsUInt(const char* param, const std::map<std::string, std::string> & params) {
std::string val = GetParamAsString(param, params);
std::string message = "Cannot parse parameter " + std::string(param) + " from IR for layer" +
". Value " + val + " cannot be casted to unsigned int.";
try {
int value = std::stoi(val);
if (value < 0) {
THROW_IE_EXCEPTION << message;
}
return static_cast<unsigned int>(value);
} catch (...) {
THROW_IE_EXCEPTION << message;
}
}
namespace ShapeInfer {
/**
* @brief Base class for all built-in shape infer implementations. Contains common logic with validators and errors
* handling
*/
class BuiltInShapeInferImpl : public IShapeInferImpl {
public:
explicit BuiltInShapeInferImpl(const std::string& type): _type(type) { }
virtual void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs,
const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) = 0;
StatusCode inferShapes(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes,
ResponseDesc* resp) noexcept override {
inShapes.clear();
for (const auto& blob : inBlobs) {
inShapes.push_back(blob->getTensorDesc().getDims());
}
outShapes.clear();
try {
inferShapesImpl(inBlobs, params, blobs, outShapes);
return OK;
} catch (const std::exception& ex) {
return InferenceEngine::DescriptionBuffer(GENERAL_ERROR, resp) << ex.what();
} catch (...) {
return InferenceEngine::DescriptionBuffer(UNEXPECTED) << "Unknown error";
}
}
protected:
std::string _type;
std::vector<SizeVector> inShapes;
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@@ -15,8 +15,8 @@ namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for ExperimentalDetectronDetectionOutput layer
*/
* @brief Implementation of Shape inference for ExperimentalDetectronDetectionOutput layer
*/
class ExperimentalDetectronDetectionOutputShapeProp : public BuiltInShapeInferImpl {
protected:
const int ROIS = 0;
@@ -27,17 +27,12 @@ public:
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
CNNLayer cnnLayer(lp);
cnnLayer.params = params;
cnnLayer.type = _type;
validate(&cnnLayer, inBlobs, params, blobs);
auto rois_num = cnnLayer.GetParamAsUInt("max_detections_per_image");
auto rois_num = GetParamAsUInt("max_detections_per_image", params);
outShapes.push_back({rois_num, 4});
auto num_outputs = cnnLayer.GetParamAsUInt("num_outputs");
if (num_outputs > 3) THROW_IE_EXCEPTION << "Incorrect value num_outputs: " << num_outputs;
auto num_outputs = GetParamAsUInt("num_outputs", params);
if (num_outputs > 3)
THROW_IE_EXCEPTION << "Incorrect value num_outputs: " << num_outputs;
if (num_outputs >= 2) {
outShapes.push_back({rois_num});
}

View File

@@ -18,8 +18,8 @@ namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for ExperimentalDetectronPriorGridGenerator layer
*/
* @brief Implementation of Shape inference for ExperimentalDetectronPriorGridGenerator layer
*/
class ExperimentalDetectronPriorGridGeneratorShapeProp : public BuiltInShapeInferImpl {
protected:
const int PRIORS = 0;
@@ -32,19 +32,13 @@ public:
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
CNNLayer cnnLayer(lp);
cnnLayer.params = params;
cnnLayer.type = _type;
validate(&cnnLayer, inBlobs, params, blobs);
const auto& priors_shape = inShapes.at(PRIORS);
const auto priors_num = priors_shape.at(0);
const auto& featmap_shape = inShapes.at(FEATMAP);
const auto grid_height = featmap_shape.at(H);
const auto grid_width = featmap_shape.at(W);
const bool flatten = cnnLayer.GetParamAsBool("flatten", true);
const bool flatten = GetParamAsBool("flatten", true, params);
if (flatten) {
outShapes.push_back({grid_height * grid_width * priors_num, 4});
} else {

View File

@@ -15,21 +15,15 @@ namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for ExperimentalDetectronGenerateProposalsSingleImage layer
*/
* @brief Implementation of Shape inference for ExperimentalDetectronGenerateProposalsSingleImage layer
*/
class ExperimentalDetectronGenerateProposalsSingleImageShapeProp : public BuiltInShapeInferImpl {
public:
explicit ExperimentalDetectronGenerateProposalsSingleImageShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
CNNLayer cnnLayer(lp);
cnnLayer.params = params;
cnnLayer.type = _type;
validate(&cnnLayer, inBlobs, params, blobs);
auto post_nms_count = cnnLayer.GetParamAsUInt("post_nms_count");
auto post_nms_count = GetParamAsUInt("post_nms_count", params);
outShapes.push_back({post_nms_count, 4});
outShapes.push_back({post_nms_count, });
}

View File

@@ -15,7 +15,7 @@ namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for Proposal layer
* @brief Implementation of Shape inference for Proposal layer
*/
class ProposalShapeProp : public BuiltInShapeInferImpl {
public:
@@ -23,14 +23,12 @@ public:
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
CNNLayer cnnLayer(lp);
cnnLayer.params = params;
cnnLayer.type = _type;
validate(&cnnLayer, inBlobs, params, blobs);
size_t post_nms_topn = static_cast<size_t>(cnnLayer.GetParamAsInt("post_nms_topn"));
auto num_outputs = cnnLayer.GetParamAsUInt("num_outputs");
if (num_outputs > 2) THROW_IE_EXCEPTION << "Incorrect value num_outputs: " << num_outputs;
size_t post_nms_topn = static_cast<size_t>(GetParamAsInt("post_nms_topn", params));
auto num_outputs = GetParamAsUInt("num_outputs", params);
if (num_outputs > 2)
THROW_IE_EXCEPTION << "Incorrect value num_outputs: " << num_outputs;
outShapes.push_back({inShapes[0][0] * post_nms_topn, 5});
if (num_outputs == 2)
outShapes.push_back({inShapes[0][0] * post_nms_topn});

View File

@@ -16,29 +16,24 @@ namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for DetectionOutput layer
* @brief Implementation of Shape inference for DetectionOutput layer
*/
template <class CELL, int S>
template <int S>
class RNNBaseCellShapeProp : public BuiltInShapeInferImpl {
public:
explicit RNNBaseCellShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
CELL cnnLayer(lp);
cnnLayer.params = params;
cnnLayer.type = _type;
validate(&cnnLayer, inBlobs, params, blobs);
auto state_dims = inShapes[1];
for (int i = 0; i < S; i++) outShapes.push_back(state_dims);
for (int i = 0; i < S; i++)
outShapes.push_back(state_dims);
}
};
using RNNCellShapeProp = RNNBaseCellShapeProp<RNNCell, 1>;
using GRUCellShapeProp = RNNBaseCellShapeProp<GRUCell, 1>;
using LSTMCellShapeProp = RNNBaseCellShapeProp<LSTMCell, 2>;
using RNNCellShapeProp = RNNBaseCellShapeProp<1>;
using GRUCellShapeProp = RNNBaseCellShapeProp<1>;
using LSTMCellShapeProp = RNNBaseCellShapeProp<2>;
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@@ -15,8 +15,8 @@ namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for ExperimentalDetectronROIFeatureExtractor layer
*/
* @brief Implementation of Shape inference for ExperimentalDetectronROIFeatureExtractor layer
*/
class ExperimentalDetectronROIFeatureExtractorShapeProp : public BuiltInShapeInferImpl {
protected:
const int ROIS = 0;
@@ -27,18 +27,12 @@ public:
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
CNNLayer cnnLayer(lp);
cnnLayer.params = params;
cnnLayer.type = _type;
validate(&cnnLayer, inBlobs, params, blobs);
size_t rois_num = inShapes.at(ROIS).at(0);
size_t channels_num = inShapes.at(FEATMAPS).at(1);
size_t output_size = static_cast<size_t>(cnnLayer.GetParamAsInt("output_size"));
size_t output_size = static_cast<size_t>(GetParamAsInt("output_size", params));
outShapes.push_back({rois_num, channels_num, output_size, output_size});
auto num_outputs = cnnLayer.GetParamAsUInt("num_outputs");
auto num_outputs = GetParamAsUInt("num_outputs", params);
if (num_outputs > 2) THROW_IE_EXCEPTION << "Incorrect value num_outputs: " << num_outputs;
if (num_outputs == 2) {
outShapes.push_back({rois_num, 4});

View File

@@ -18,7 +18,7 @@ namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for SimplerNMS layer
* @brief Implementation of Shape inference for SimplerNMS layer
*/
class SimplerNMSShapeProp : public BuiltInShapeInferImpl {
public:
@@ -26,13 +26,7 @@ public:
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
CNNLayer cnnLayer(lp);
cnnLayer.params = params;
cnnLayer.type = _type;
validate(&cnnLayer, inBlobs, params, blobs);
size_t post_nms_topn = static_cast<size_t>(cnnLayer.GetParamAsInt("post_nms_topn"));
size_t post_nms_topn = static_cast<size_t>(GetParamAsInt("post_nms_topn", params));
outShapes.push_back({post_nms_topn, 5});
}
};

View File

@@ -14,22 +14,16 @@ namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for SparseToDense layer
* @brief Implementation of Shape inference for SparseToDense layer
*/
class SparseToDenseShapeProp : public BuiltInShapeInferImpl {
public:
explicit SparseToDenseShapeProp(const std::string& type) : BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs,
const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs,
std::vector<SizeVector>& outShapes) override {
LayerParams lp{};
SparseToDenseLayer sparse_to_dense_layer(lp);
sparse_to_dense_layer.params = params;
sparse_to_dense_layer.type = _type;
validate(&sparse_to_dense_layer, inBlobs, params, blobs);
const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs,
std::vector<SizeVector>& outShapes) override {
SizeVector shapes;
if (inBlobs[1]->getTensorDesc().getPrecision() == Precision::I32) {
auto* buffer = inBlobs[1]->cbuffer().as<int*>();

View File

@@ -4,35 +4,27 @@
#pragma once
#include <ie_layers.h>
#include <description_buffer.hpp>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
#include "shape_infer/ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for ExperimentalDetectronTopKROIs layer
*/
* @brief Implementation of Shape inference for ExperimentalDetectronTopKROIs layer
*/
class ExperimentalDetectronTopKROIsShapeProp : public BuiltInShapeInferImpl {
public:
explicit ExperimentalDetectronTopKROIsShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
CNNLayer cnnLayer(lp);
cnnLayer.params = params;
cnnLayer.type = _type;
validate(&cnnLayer, inBlobs, params, blobs);
const auto max_rois = cnnLayer.GetParamAsUInt("max_rois");
const auto max_rois = GetParamAsUInt("max_rois", params);
outShapes.push_back({max_rois, 4});
}
};

View File

@@ -9,13 +9,13 @@
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
#include "shape_infer/ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for Unique layer
* @brief Implementation of Shape inference for Unique layer
*/
class UniqueShapeProp : public BuiltInShapeInferImpl {
public:
@@ -23,18 +23,15 @@ public:
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
UniqueLayer unique_layer(lp);
unique_layer.params = params;
unique_layer.type = _type;
validate(&unique_layer, inBlobs, params, blobs);
bool return_inverse = GetParamAsBool("return_inverse", params);
bool return_counts = GetParamAsBool("return_counts", params);
// compute a number of outputs
size_t num_outputs = 1;
if (unique_layer.return_counts) {
if (return_counts) {
num_outputs++;
}
if (unique_layer.return_inverse) {
if (return_inverse) {
num_outputs++;
}

View File

@@ -18,14 +18,12 @@
#include "description_buffer.hpp"
#include <legacy/ie_layers.h>
#include <legacy/ie_ishape_infer_extension.hpp>
namespace InferenceEngine {
namespace ShapeInfer {
class Reshaper;
using ReshaperPtr = std::shared_ptr<Reshaper>;
} // namespace ShapeInfer
class IShapeInferExtension;
using IShapeInferExtensionPtr = std::shared_ptr<IShapeInferExtension>;
namespace details {
class INFERENCE_ENGINE_API_CLASS(CNNNetworkImpl): public ICNNNetwork {
@@ -126,9 +124,6 @@ public:
StatusCode reshape(const std::map<std::string, std::vector<size_t>>& inputShapes,
ResponseDesc* resp) noexcept override;
StatusCode AddExtension(const InferenceEngine::IShapeInferExtensionPtr& extension,
InferenceEngine::ResponseDesc* resp) noexcept;
StatusCode serialize(const std::string& xmlPath, const std::string& binPath, ResponseDesc* resp) const
noexcept override;
@@ -139,7 +134,6 @@ protected:
std::map<std::string, DataPtr> _outputData;
std::string _name;
DataPtr _emptyData;
ShapeInfer::ReshaperPtr _reshaper;
};
typedef std::shared_ptr<CNNNetworkImpl> CNNNetworkImplPtr;

View File

@@ -29,7 +29,6 @@
#include "legacy/details/ie_cnn_network_tools.h"
#include <legacy/cnn_network_impl.hpp>
#include "network_serializer_v7.hpp"
#include <shape_infer/ie_reshaper.hpp>
using namespace std;
using namespace InferenceEngine;
@@ -364,31 +363,24 @@ size_t CNNNetworkImpl::getBatchSize() const noexcept {
StatusCode CNNNetworkImpl::reshape(const std::map<std::string, std::vector<size_t>>& inputShapes,
ResponseDesc* responseDesc) noexcept {
try {
if (!_reshaper) _reshaper = std::make_shared<ShapeInfer::Reshaper>(*this);
_reshaper->run(inputShapes);
} catch (const InferenceEngineException& e) {
return DescriptionBuffer(GENERAL_ERROR, responseDesc) << e.what();
} catch (const std::exception& e) {
return DescriptionBuffer(UNEXPECTED, responseDesc) << e.what();
} catch (...) {
return DescriptionBuffer(UNEXPECTED, responseDesc);
for (const auto& pair : _inputData) {
auto info = pair.second;
if (info) {
auto data = info->getInputData();
auto it = inputShapes.find(pair.first);
if (data && it != inputShapes.end()) {
auto newDims = it->second;
auto currentDims = data->getTensorDesc().getDims();
if (newDims != currentDims) {
return DescriptionBuffer(NOT_IMPLEMENTED, responseDesc) <<
"You have called setBatchSize + reshape for CNNNetwork object. Please, either: \n"
"- [SUGGESTED] Regenerate IR with current version of Model Optimizer\n"
"- [WORKAROUND] Call only reshape method where proper batch is already set\n";
}
}
}
}
return OK;
}
StatusCode CNNNetworkImpl::AddExtension(const InferenceEngine::IShapeInferExtensionPtr& extension,
InferenceEngine::ResponseDesc* resp) noexcept {
try {
if (!_reshaper) _reshaper = std::make_shared<ShapeInfer::Reshaper>(*this);
_reshaper->AddExtension(extension);
} catch (const InferenceEngineException& e) {
return DescriptionBuffer(GENERAL_ERROR, resp) << e.what();
} catch (const std::exception& e) {
return DescriptionBuffer(UNEXPECTED, resp) << e.what();
} catch (...) {
return DescriptionBuffer(UNEXPECTED, resp);
}
return OK;
}

View File

@@ -669,6 +669,8 @@ InferenceEngine::details::CNNLayerCreator::CNNLayerCreator(const std::shared_ptr
[](const std::shared_ptr<::ngraph::Node>& node, const std::map<std::string, std::string>& params) -> CNNLayerPtr {
LayerParams attrs = {node->get_friendly_name(), node->description(), details::convertPrecision(node->get_output_element_type(0))};
auto reduce_node = std::dynamic_pointer_cast<ngraph::op::util::ArithmeticReductionKeepDims>(node);
if (reduce_node == nullptr)
THROW_IE_EXCEPTION << "Node '" << node->get_name() << "' is not an instance of ArithmeticReductionKeepDims.";
auto res = std::make_shared<InferenceEngine::ReduceLayer>(attrs);
res->params = params;
res->params["keep_dims"] = reduce_node->get_keep_dims() ? "True" : "False";
@@ -678,6 +680,8 @@ InferenceEngine::details::CNNLayerCreator::CNNLayerCreator(const std::shared_ptr
addSpecificCreator({"ReduceLogicalAnd"}, [](const std::shared_ptr<::ngraph::Node>& node, const std::map<std::string, std::string>& params) -> CNNLayerPtr {
LayerParams attrs = {node->get_friendly_name(), "ReduceAnd", details::convertPrecision(node->get_output_element_type(0))};
auto reduce_node = std::dynamic_pointer_cast<ngraph::op::util::LogicalReductionKeepDims>(node);
if (reduce_node == nullptr)
THROW_IE_EXCEPTION << "Node '" << node->get_name() << "' is not an instance of LogicalReductionKeepDims.";
auto res = std::make_shared<InferenceEngine::ReduceLayer>(attrs);
res->params = params;
res->params["keep_dims"] = reduce_node->get_keep_dims() ? "True" : "False";
@@ -687,6 +691,8 @@ InferenceEngine::details::CNNLayerCreator::CNNLayerCreator(const std::shared_ptr
addSpecificCreator({"ReduceLogicalOr"}, [](const std::shared_ptr<::ngraph::Node>& node, const std::map<std::string, std::string>& params) -> CNNLayerPtr {
LayerParams attrs = {node->get_friendly_name(), "ReduceOr", details::convertPrecision(node->get_output_element_type(0))};
auto reduce_node = std::dynamic_pointer_cast<ngraph::op::util::LogicalReductionKeepDims>(node);
if (reduce_node == nullptr)
THROW_IE_EXCEPTION << "Node '" << node->get_name() << "' is not an instance of LogicalReductionKeepDims.";
auto res = std::make_shared<InferenceEngine::ReduceLayer>(attrs);
res->params = params;
res->params["keep_dims"] = reduce_node->get_keep_dims() ? "True" : "False";
@@ -1105,10 +1111,6 @@ void convertFunctionToICNNNetwork(const std::shared_ptr<const ::ngraph::Function
i.second->setLayout(thisInputData.getLayout());
i.second->getPreProcess() = thisInputData.getPreProcess();
}
for (const auto &ext : ::ngraph::op::GenericIE::getExtensions(graph)) {
cnnNetworkImpl->AddExtension(ext, nullptr);
}
}
std::shared_ptr<CNNNetworkImpl> convertFunctionToICNNNetwork(const std::shared_ptr<const ::ngraph::Function> &graph,

View File

@@ -398,8 +398,10 @@ bool convertToRNNSeq(CNNLayerPtr cur, const N& net) {
IE_ASSERT(cell->insData.size() == NS + 1); // {data, state1, [state2]}
IE_ASSERT(cell->outData.size() == NS); // {state1, [state2]}
auto outData0InputsTo = getInputTo(cell->outData[0]);
if (getCreatorLayer(cell->insData[0].lock()).lock() != rsp1 ||
getInputTo(cell->outData[0]).begin()->second != rsp2)
outData0InputsTo.empty() ||
outData0InputsTo.begin()->second != rsp2)
return false;
// Check port mapping

View File

@@ -1,67 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <legacy/ie_layers.h>
#include <description_buffer.hpp>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for ArgMax layer
*/
class ArgMaxShapeProp : public BuiltInShapeInferImpl {
public:
explicit ArgMaxShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
CNNLayer cnnLayer(lp);
cnnLayer.params = params;
cnnLayer.type = _type;
validate(&cnnLayer, inBlobs, params, blobs);
auto out_max_val = static_cast<size_t>(cnnLayer.GetParamAsInt("out_max_val", 0));
auto top_k = static_cast<size_t>(cnnLayer.GetParamAsInt("top_k", 0));
int axis = 0;
bool isValidAxis = true;
try {
axis = cnnLayer.GetParamAsInt("axis");
} catch (const details::InferenceEngineException& exception) {
isValidAxis = false;
}
auto firstInputShape = inShapes[0];
size_t num_top_axes = firstInputShape.size();
if (num_top_axes < 3) num_top_axes = 3;
SizeVector outputShape(num_top_axes, 1lu);
if (isValidAxis) {
if (axis < 0) {
axis = static_cast<int>(firstInputShape.size() + axis);
}
outputShape = firstInputShape;
outputShape[axis] = top_k;
} else {
outputShape[0] = firstInputShape[0];
outputShape[2] = top_k;
if (out_max_val) {
outputShape[1] = 2;
}
}
outShapes.push_back(outputShape);
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@@ -1,78 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <debug.h>
#include <cmath>
#include <description_buffer.hpp>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for BinaryConvolution layer
*/
class BinConvShapeProp : public BuiltInShapeInferImpl {
public:
explicit BinConvShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
BinaryConvolutionLayer binConvLayer(lp);
binConvLayer.params = params;
binConvLayer.type = _type;
validate(&binConvLayer, inBlobs, params, blobs);
auto dims = inShapes[0];
auto computeSpatialShape = [&](size_t inDim, int axis) {
size_t kernel = 0;
if (binConvLayer._dilation[axis])
kernel = (binConvLayer._kernel[axis] - 1) * binConvLayer._dilation[axis] + 1;
else
kernel = binConvLayer._kernel[axis];
size_t stride = binConvLayer._stride[axis];
size_t pad = binConvLayer._padding[axis];
float outDim;
std::string padType = binConvLayer._auto_pad;
if (padType == "valid") {
outDim = std::ceil((inDim - kernel + 1.f) / stride);
} else if (padType == "same_upper") {
outDim = std::ceil(1.f * inDim / stride);
} else if (padType == "same_lower") {
outDim = std::floor(1.f * inDim / stride);
} else {
int padEnd = binConvLayer._pads_end[axis];
outDim = std::floor(1.f * (inDim + pad + padEnd - kernel) / stride) + 1.f;
}
if (outDim < 0)
THROW_IE_EXCEPTION << "New shapes " << details::dumpVec(dims) << " make output shape negative";
return static_cast<size_t>(outDim);
};
size_t inputN = dims[0];
size_t OC = binConvLayer._out_depth;
SizeVector shapes;
shapes.push_back(inputN);
shapes.push_back(OC);
if (dims.size() == 5) shapes.push_back(computeSpatialShape(dims[dims.size() - 3], Z_AXIS));
shapes.push_back(computeSpatialShape(dims[dims.size() - 2], Y_AXIS));
shapes.push_back(computeSpatialShape(dims[dims.size() - 1], X_AXIS));
outShapes.push_back(shapes);
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@@ -1,80 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
#include "precision_utils.h"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for Broadcast layer
*/
class BroadcastShapeProp : public BuiltInShapeInferImpl {
public:
explicit BroadcastShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
BroadcastLayer broadcastLayer(lp);
broadcastLayer.params = params;
broadcastLayer.type = _type;
validate(&broadcastLayer, inBlobs, params, blobs);
SizeVector shapes;
if (inBlobs[1]->getTensorDesc().getPrecision() == Precision::I32) {
auto* buffer = inBlobs[1]->cbuffer().as<int*>();
if (buffer != nullptr) {
shapes.assign(buffer, buffer + inBlobs[1]->size());
} else {
THROW_IE_EXCEPTION << "Second input must have allocated data";
}
} else if (inBlobs[1]->getTensorDesc().getPrecision() == Precision::FP32) {
auto* buffer = inBlobs[1]->cbuffer().as<float*>();
if (buffer != nullptr) {
for (int i = 0; i < inBlobs[1]->size(); i++) {
shapes.push_back(static_cast<int>(buffer[i]));
}
} else {
THROW_IE_EXCEPTION << "Second input must have allocated data";
}
} else if (inBlobs[1]->getTensorDesc().getPrecision() == Precision::FP16) {
auto* buffer = inBlobs[1]->cbuffer().as<uint16_t*>();
if (buffer != nullptr) {
for (int i = 0; i < inBlobs[1]->size(); i++) {
shapes.push_back(static_cast<int>(PrecisionUtils::f16tof32(buffer[i])));
}
}
} else if (inBlobs[1]->getTensorDesc().getPrecision() == Precision::I64) {
auto* buffer = inBlobs[1]->cbuffer().as<int64_t*>();
if (buffer != nullptr) {
shapes.assign(buffer, buffer + inBlobs[1]->size());
} else {
THROW_IE_EXCEPTION << "Second input must have allocated data";
}
} else if (inBlobs[1]->getTensorDesc().getPrecision() == Precision::U64) {
auto* buffer = inBlobs[1]->cbuffer().as<uint64_t*>();
if (buffer != nullptr) {
shapes.assign(buffer, buffer + inBlobs[1]->size());
} else {
THROW_IE_EXCEPTION << "Second input must have allocated data";
}
} else {
THROW_IE_EXCEPTION << "Second input must have I32 or FP32 or FP16 precision";
}
outShapes = {shapes};
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@@ -1,43 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "ie_built_in_impl.hpp"
#include <map>
#include <memory>
#include <string>
#include <vector>
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for Bucketize layer
*/
class BucketizeShapeProp : public BuiltInShapeInferImpl {
public:
explicit BucketizeShapeProp(const std::string& type) : BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs,
const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs,
std::vector<SizeVector>& outShapes) override {
LayerParams lp{};
BucketizeLayer bucketize_layer(lp);
bucketize_layer.params = params;
bucketize_layer.type = _type;
validate(&bucketize_layer, inBlobs, params, blobs);
// compute a number of outputs
size_t num_outputs = 1;
// reshape available outputs
outShapes.resize(num_outputs);
outShapes[0] = inShapes[0];
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@@ -1,264 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <algorithm>
#include <memory>
#include <string>
#include "legacy/shape_infer/built-in/ie_built_in_holder.hpp"
#include "ie_argmax_shape_infer.hpp"
#include "ie_bin_conv_shape_infer.hpp"
#include "ie_broadcast_shape_infer.hpp"
#include "ie_concat_shape_infer.hpp"
#include "ie_conv_shape_infer.hpp"
#include "ie_crop_shape_infer.hpp"
#include "ie_ctc_greedy_decoder_shape_infer.hpp"
#include "ie_deconv_shape_infer.hpp"
#include "ie_deformable_conv_shape_infer.hpp"
#include "ie_depth_to_space_shape_infer.hpp"
#include "ie_detectionoutput_onnx_shape_infer.hpp"
#include "ie_detection_output_shape_infer.hpp"
#include "ie_eltwise_shape_infer.hpp"
#include "ie_equal_shape_infer.hpp"
#include "ie_erf_shape_infer.hpp"
#include "ie_fill_shape_infer.hpp"
#include "ie_flatten_shape_infer.hpp"
#include "ie_gather_shape_infer.hpp"
#include "ie_gather_tree_shape_infer.hpp"
#include "ie_gemm_shape_infer.hpp"
#include "ie_inner_product_shape_infer.hpp"
#include "ie_interp_shape_infer.hpp"
#include "ie_non_max_suppression_shape_infer.hpp"
#include "ie_one_hot_shape_infer.hpp"
#include "ie_pad_shape_infer.hpp"
#include "ie_permute_shape_infer.hpp"
#include "ie_pool_shape_infer.hpp"
#include "ie_priorbox_clustered_shape_infer.hpp"
#include "ie_priorbox_shape_infer.hpp"
#include "ie_priorgridgenerator_onnx_shape_infer.hpp"
#include "ie_proposal_onnx_shape_infer.hpp"
#include "ie_proposal_shape_infer.hpp"
#include "ie_psroi_pooling_shape_infer.hpp"
#include "ie_quantize_shape_infer.hpp"
#include "ie_range_shape_infer.hpp"
#include "ie_reduce_shape_infer.hpp"
#include "ie_region_yolo_shape_infer.hpp"
#include "ie_reorg_yolo_shape_infer.hpp"
#include "ie_resample_shape_infer.hpp"
#include "ie_reshape_shape_infer.hpp"
#include "ie_reverse_sequence_shape_infer.hpp"
#include "ie_rnn_cell_shape_infer.hpp"
#include "ie_rnn_shape_infer.hpp"
#include "ie_roi_pooling_shape_infer.hpp"
#include "ie_roifeatureextractor_onnx_shape_infer.hpp"
#include "ie_scatter_shape_infer.hpp"
#include "ie_select_shape_infer.hpp"
#include "ie_shape_shape_infer.hpp"
#include "ie_shuffle_channels_shape_infer.hpp"
#include "ie_simpler_nms_shape_infer.hpp"
#include "ie_space_to_depth_shape_infer.hpp"
#include "ie_sparse_fill_empty_rows_shape_infer.hpp"
#include "ie_sparse_segment_reduce_shape_infer.hpp"
#include "ie_split_shape_infer.hpp"
#include "ie_sparse_to_dense_shape_infer.hpp"
#include "ie_bucketize_shape_infer.hpp"
#include "ie_squeeze_shape_infer.hpp"
#include "ie_sparse_weighted_reduce_shape_infer.hpp"
#include "ie_strided_slice_shape_infer.hpp"
#include "ie_tensor_iterator_shape_infer.hpp"
#include "ie_tile_shape_infer.hpp"
#include "ie_topk_shape_infer.hpp"
#include "ie_topkrois_onnx_shape_infer.hpp"
#include "ie_unique_shape_infer.hpp"
#include "ie_unsqueeze_shape_infer.hpp"
#include "ie_upsampling_shape_infer.hpp"
#include "impl_register.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
BuiltInShapeInferHolder::ImplsHolder::Ptr BuiltInShapeInferHolder::GetImplsHolder() {
static ImplsHolder::Ptr localHolder;
if (localHolder == nullptr) {
localHolder = std::make_shared<ImplsHolder>();
}
return localHolder;
}
IE_SUPPRESS_DEPRECATED_START
void BuiltInShapeInferHolder::AddImpl(const std::string& name, const IShapeInferImpl::Ptr& impl) {
GetImplsHolder()->list[name] = impl;
}
StatusCode BuiltInShapeInferHolder::getShapeInferTypes(char**& types, unsigned int& size, ResponseDesc* resp) noexcept {
auto& factories = GetImplsHolder()->list;
types = new char*[factories.size()];
size = 0;
for (auto it = factories.begin(); it != factories.end(); it++, size++) {
types[size] = new char[it->first.size() + 1];
std::copy(it->first.begin(), it->first.end(), types[size]);
types[size][it->first.size()] = '\0';
}
return OK;
}
StatusCode BuiltInShapeInferHolder::getShapeInferImpl(IShapeInferImpl::Ptr& impl, const char* type,
ResponseDesc* resp) noexcept {
auto& impls = BuiltInShapeInferHolder::GetImplsHolder()->list;
if (impls.find(type) != impls.end()) {
impl = impls[type];
return OK;
}
impl.reset();
return NOT_FOUND;
}
IE_SUPPRESS_DEPRECATED_END
// Register without implementation just to protect from adding custom implementation for them
REG_SHAPE_INFER_FOR_TYPE(EqualShapeProp, Input);
REG_SHAPE_INFER_FOR_TYPE(DoNothingShapeProp, Output);
REG_SHAPE_INFER_FOR_TYPE(MemoryShapeProp, Memory);
REG_SHAPE_INFER_FOR_TYPE(EqualShapeProp, Const);
// Outputs = Inputs
REG_SHAPE_INFER_FOR_TYPE(EqualShapeProp, Activation);
REG_SHAPE_INFER_FOR_TYPE(EqualShapeProp, ReLU);
REG_SHAPE_INFER_FOR_TYPE(EqualShapeProp, ReLU6);
REG_SHAPE_INFER_FOR_TYPE(EqualShapeProp, ELU);
REG_SHAPE_INFER_FOR_TYPE(EqualShapeProp, TanH);
REG_SHAPE_INFER_FOR_TYPE(EqualShapeProp, Logistic);
REG_SHAPE_INFER_FOR_TYPE(EqualShapeProp, Sigmoid);
REG_SHAPE_INFER_FOR_TYPE(EqualShapeProp, PReLU);
REG_SHAPE_INFER_FOR_TYPE(EqualShapeProp, SoftMax);
REG_SHAPE_INFER_FOR_TYPE(EqualShapeProp, LogSoftMax);
REG_SHAPE_INFER_FOR_TYPE(EqualShapeProp, LRN);
REG_SHAPE_INFER_FOR_TYPE(EqualShapeProp, Norm);
REG_SHAPE_INFER_FOR_TYPE(EqualShapeProp, Normalize);
REG_SHAPE_INFER_FOR_TYPE(EqualShapeProp, Convert);
// FIXME: Really Copy??? New MO doesn't generate this layer
REG_SHAPE_INFER_FOR_TYPE(EqualShapeProp, Copy);
REG_SHAPE_INFER_FOR_TYPE(EqualShapeProp, Power);
REG_SHAPE_INFER_FOR_TYPE(EqualShapeProp, PowerFile);
REG_SHAPE_INFER_FOR_TYPE(EqualShapeProp, Clamp);
REG_SHAPE_INFER_FOR_TYPE(EqualShapeProp, ScaleShift);
REG_SHAPE_INFER_FOR_TYPE(EqualShapeProp, BatchNormalization);
REG_SHAPE_INFER_FOR_TYPE(EqualShapeProp, GRN);
REG_SHAPE_INFER_FOR_TYPE(EqualShapeProp, MVN);
REG_SHAPE_INFER_FOR_TYPE(ConvShapeProp, Convolution);
REG_SHAPE_INFER_FOR_TYPE(DeconvShapeProp, Deconvolution);
REG_SHAPE_INFER_FOR_TYPE(DeformableConvShapeProp, DeformableConvolution);
REG_SHAPE_INFER_FOR_TYPE(PoolingShapeProp, Pooling);
REG_SHAPE_INFER_FOR_TYPE(InnerProductShapeProp, InnerProduct);
REG_SHAPE_INFER_FOR_TYPE(InnerProductShapeProp, FullyConnected);
REG_SHAPE_INFER_FOR_TYPE(SplitShapeProp, Split);
REG_SHAPE_INFER_FOR_TYPE(SplitShapeProp, Slice);
REG_SHAPE_INFER_FOR_TYPE(PermuteShapeProp, Permute);
REG_SHAPE_INFER_FOR_TYPE(FlattenShapeProp, Flatten);
REG_SHAPE_INFER_FOR_TYPE(ReshapeShapeProp, Reshape);
REG_SHAPE_INFER_FOR_TYPE(DetectionOutputShapeProp, DetectionOutput);
REG_SHAPE_INFER_FOR_TYPE(PriorBoxClusteredShapeProp, PriorBoxClustered);
REG_SHAPE_INFER_FOR_TYPE(PriorBoxShapeProp, PriorBox);
REG_SHAPE_INFER_FOR_TYPE(ExperimentalDetectronDetectionOutputShapeProp, ExperimentalDetectronDetectionOutput);
REG_SHAPE_INFER_FOR_TYPE(ExperimentalDetectronPriorGridGeneratorShapeProp, ExperimentalDetectronPriorGridGenerator);
REG_SHAPE_INFER_FOR_TYPE(ExperimentalDetectronGenerateProposalsSingleImageShapeProp, ExperimentalDetectronGenerateProposalsSingleImage);
REG_SHAPE_INFER_FOR_TYPE(ExperimentalDetectronROIFeatureExtractorShapeProp, ExperimentalDetectronROIFeatureExtractor);
REG_SHAPE_INFER_FOR_TYPE(ExperimentalDetectronTopKROIsShapeProp, ExperimentalDetectronTopKROIs);
REG_SHAPE_INFER_FOR_TYPE(RoiPoolingShapeProp, ROIPooling);
REG_SHAPE_INFER_FOR_TYPE(PSRoiPoolingShapeProp, PSROIPooling);
REG_SHAPE_INFER_FOR_TYPE(UpsamplingShapeProp, Upsampling);
REG_SHAPE_INFER_FOR_TYPE(ResampleShapeProp, Resample);
REG_SHAPE_INFER_FOR_TYPE(InterpShapeProp, Interp);
REG_SHAPE_INFER_FOR_TYPE(SimplerNMSShapeProp, SimplerNMS);
REG_SHAPE_INFER_FOR_TYPE(TileShapeProp, Tile);
REG_SHAPE_INFER_FOR_TYPE(CropShapeProp, Crop);
REG_SHAPE_INFER_FOR_TYPE(ConcatShapeProp, Concat);
REG_SHAPE_INFER_FOR_TYPE(EltWiseShapeProp, Eltwise);
REG_SHAPE_INFER_FOR_TYPE(EltWiseShapeProp, Mul);
REG_SHAPE_INFER_FOR_TYPE(EltWiseShapeProp, Add);
REG_SHAPE_INFER_FOR_TYPE(EltWiseShapeProp, Div);
REG_SHAPE_INFER_FOR_TYPE(CTCGreedyDecoderShapeProp, CTCGreedyDecoder);
REG_SHAPE_INFER_FOR_TYPE(ProposalShapeProp, Proposal);
REG_SHAPE_INFER_FOR_TYPE(ReorgYoloShapeProp, ReorgYolo);
REG_SHAPE_INFER_FOR_TYPE(RegionYoloShapeProp, RegionYolo);
REG_SHAPE_INFER_FOR_TYPE(RNNShapeProp, RNNSequence);
REG_SHAPE_INFER_FOR_TYPE(RNNShapeProp, GRUSequence);
REG_SHAPE_INFER_FOR_TYPE(RNNShapeProp, LSTMSequence);
REG_SHAPE_INFER_FOR_TYPE(RNNCellShapeProp, RNNCell);
REG_SHAPE_INFER_FOR_TYPE(GRUCellShapeProp, GRUCell);
REG_SHAPE_INFER_FOR_TYPE(LSTMCellShapeProp, LSTMCell);
REG_SHAPE_INFER_FOR_TYPE(TensorIteratorShapeProp, TensorIterator);
REG_SHAPE_INFER_FOR_TYPE(ArgMaxShapeProp, ArgMax);
REG_SHAPE_INFER_FOR_TYPE(GemmShapeProp, Gemm);
REG_SHAPE_INFER_FOR_TYPE(PadShapeProp, Pad);
REG_SHAPE_INFER_FOR_TYPE(GatherShapeProp, Gather);
REG_SHAPE_INFER_FOR_TYPE(StridedSliceShapeProp, StridedSlice);
REG_SHAPE_INFER_FOR_TYPE(ShuffleChannelsShapeProp, ShuffleChannels);
REG_SHAPE_INFER_FOR_TYPE(DepthToSpaceShapeProp, DepthToSpace);
REG_SHAPE_INFER_FOR_TYPE(SpaceToDepthShapeProp, SpaceToDepth);
REG_SHAPE_INFER_FOR_TYPE(SparseFillEmptyRowsShapeProp, SparseFillEmptyRows);
REG_SHAPE_INFER_FOR_TYPE(SparseSegmentReduceShapeProp, SparseSegmentMean);
REG_SHAPE_INFER_FOR_TYPE(SparseSegmentReduceShapeProp, SparseSegmentSqrtN);
REG_SHAPE_INFER_FOR_TYPE(SparseSegmentReduceShapeProp, SparseSegmentSum);
REG_SHAPE_INFER_FOR_TYPE(ExperimentalSparseWeightedReduceShapeProp, ExperimentalSparseWeightedSum);
REG_SHAPE_INFER_FOR_TYPE(SparseToDenseShapeProp, SparseToDense);
REG_SHAPE_INFER_FOR_TYPE(BucketizeShapeProp, Bucketize);
REG_SHAPE_INFER_FOR_TYPE(ReverseSequenceShapeProp, ReverseSequence);
REG_SHAPE_INFER_FOR_TYPE(SelectShapeProp, Select);
REG_SHAPE_INFER_FOR_TYPE(SqueezeShapeProp, Squeeze);
REG_SHAPE_INFER_FOR_TYPE(UnsqueezeShapeProp, Unsqueeze);
REG_SHAPE_INFER_FOR_TYPE(RangeShapeProp, Range);
REG_SHAPE_INFER_FOR_TYPE(FillShapeProp, Fill);
REG_SHAPE_INFER_FOR_TYPE(BroadcastShapeProp, Broadcast);
REG_SHAPE_INFER_FOR_TYPE(ShapeShapeProp, Shape);
REG_SHAPE_INFER_FOR_TYPE(OneHotShapeProp, OneHot);
REG_SHAPE_INFER_FOR_TYPE(QuantizeShapeProp, FakeQuantize);
REG_SHAPE_INFER_FOR_TYPE(BinConvShapeProp, BinaryConvolution);
REG_SHAPE_INFER_FOR_TYPE(MathShapeProp, Abs);
REG_SHAPE_INFER_FOR_TYPE(MathShapeProp, Acos);
REG_SHAPE_INFER_FOR_TYPE(MathShapeProp, Acosh);
REG_SHAPE_INFER_FOR_TYPE(MathShapeProp, Asin);
REG_SHAPE_INFER_FOR_TYPE(MathShapeProp, Asinh);
REG_SHAPE_INFER_FOR_TYPE(MathShapeProp, Atan);
REG_SHAPE_INFER_FOR_TYPE(MathShapeProp, Atanh);
REG_SHAPE_INFER_FOR_TYPE(MathShapeProp, Ceil);
REG_SHAPE_INFER_FOR_TYPE(MathShapeProp, Cos);
REG_SHAPE_INFER_FOR_TYPE(MathShapeProp, Cosh);
REG_SHAPE_INFER_FOR_TYPE(MathShapeProp, Erf);
REG_SHAPE_INFER_FOR_TYPE(MathShapeProp, Floor);
REG_SHAPE_INFER_FOR_TYPE(MathShapeProp, HardSigmoid);
REG_SHAPE_INFER_FOR_TYPE(MathShapeProp, Log);
REG_SHAPE_INFER_FOR_TYPE(MathShapeProp, Exp);
REG_SHAPE_INFER_FOR_TYPE(MathShapeProp, Neg);
REG_SHAPE_INFER_FOR_TYPE(MathShapeProp, Reciprocal);
REG_SHAPE_INFER_FOR_TYPE(MathShapeProp, Selu);
REG_SHAPE_INFER_FOR_TYPE(MathShapeProp, Sign);
REG_SHAPE_INFER_FOR_TYPE(MathShapeProp, Sin);
REG_SHAPE_INFER_FOR_TYPE(MathShapeProp, Sinh);
REG_SHAPE_INFER_FOR_TYPE(MathShapeProp, Softplus);
REG_SHAPE_INFER_FOR_TYPE(MathShapeProp, Softsign);
REG_SHAPE_INFER_FOR_TYPE(MathShapeProp, Tan);
REG_SHAPE_INFER_FOR_TYPE(ReduceShapeProp, ReduceAnd);
REG_SHAPE_INFER_FOR_TYPE(ReduceShapeProp, ReduceL1);
REG_SHAPE_INFER_FOR_TYPE(ReduceShapeProp, ReduceL2);
REG_SHAPE_INFER_FOR_TYPE(ReduceShapeProp, ReduceLogSum);
REG_SHAPE_INFER_FOR_TYPE(ReduceShapeProp, ReduceLogSumExp);
REG_SHAPE_INFER_FOR_TYPE(ReduceShapeProp, ReduceMax);
REG_SHAPE_INFER_FOR_TYPE(ReduceShapeProp, ReduceMean);
REG_SHAPE_INFER_FOR_TYPE(ReduceShapeProp, ReduceMin);
REG_SHAPE_INFER_FOR_TYPE(ReduceShapeProp, ReduceOr);
REG_SHAPE_INFER_FOR_TYPE(ReduceShapeProp, ReduceProd);
REG_SHAPE_INFER_FOR_TYPE(ReduceShapeProp, ReduceSum);
REG_SHAPE_INFER_FOR_TYPE(ReduceShapeProp, ReduceSumSquare);
REG_SHAPE_INFER_FOR_TYPE(GatherTreeShapeProp, GatherTree);
REG_SHAPE_INFER_FOR_TYPE(TopKShapeProp, TopK);
REG_SHAPE_INFER_FOR_TYPE(UniqueShapeProp, Unique);
REG_SHAPE_INFER_FOR_TYPE(NMSShapeProp, NonMaxSuppression);
REG_SHAPE_INFER_FOR_TYPE(ScatterUpdateShapeProp, ScatterUpdate);
REG_SHAPE_INFER_FOR_TYPE(ScatterElementsUpdateShapeProp, ScatterElementsUpdate);
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@@ -1,71 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_iextension.h>
#include <legacy/ie_layers.h>
#include <description_buffer.hpp>
#include <ie_layer_validators.hpp>
#include <list>
#include <map>
#include <memory>
#include <string>
#include <vector>
namespace InferenceEngine {
namespace ShapeInfer {
IE_SUPPRESS_DEPRECATED_START
/**
*@brief Base class for all built-in shape infer implementations. Contains common logic with validators and errors
*handling
*/
class BuiltInShapeInferImpl : public IShapeInferImpl {
public:
explicit BuiltInShapeInferImpl(const std::string& type): _type(type) {
_validator = details::LayerValidators::getInstance()->getValidator(_type);
if (!_validator)
THROW_IE_EXCEPTION << "Internal error: failed to find validator for layer with type: " << _type;
}
void validate(CNNLayer* layer, const std::vector<Blob::CPtr>& inBlobs,
const std::map<std::string, std::string>& params, const std::map<std::string, Blob::Ptr>& blobs) {
_validator->parseParams(layer);
}
virtual void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs,
const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) = 0;
StatusCode inferShapes(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes,
ResponseDesc* resp) noexcept override {
inShapes.clear();
for (const auto& blob : inBlobs) {
inShapes.push_back(blob->getTensorDesc().getDims());
}
outShapes.clear();
try {
inferShapesImpl(inBlobs, params, blobs, outShapes);
return OK;
} catch (const std::exception& ex) {
return InferenceEngine::DescriptionBuffer(GENERAL_ERROR, resp) << ex.what();
} catch (...) {
return InferenceEngine::DescriptionBuffer(UNEXPECTED) << "Unknown error";
}
}
protected:
std::string _type;
details::LayerValidator::Ptr _validator;
std::vector<SizeVector> inShapes;
};
IE_SUPPRESS_DEPRECATED_END
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@@ -1,44 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for Concat layer
*/
class ConcatShapeProp : public BuiltInShapeInferImpl {
public:
explicit ConcatShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
ConcatLayer concatLayer(lp);
concatLayer.params = params;
concatLayer.type = _type;
validate(&concatLayer, inBlobs, params, blobs);
size_t sum(0);
size_t axis = concatLayer._axis;
outShapes.push_back(inShapes[0]);
for (const auto& inShape : inShapes) {
if (axis >= inShape.size()) THROW_IE_EXCEPTION << "Axis can't be more then number of input shapes";
sum += inShape[axis];
}
outShapes[0][axis] = sum;
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@@ -1,82 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <cmath>
#include <description_buffer.hpp>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for Convolution layer
*/
class ConvShapeProp : public BuiltInShapeInferImpl {
public:
explicit ConvShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
ConvolutionLayer convLayer(lp);
convLayer.params = params;
convLayer.type = _type;
validate(&convLayer, inBlobs, params, blobs);
auto dims = inShapes[0];
auto dims_size = dims.size();
auto spacial_d_size = dims.size() - 2;
float* OD_temp = new float[spacial_d_size];
size_t* KDims = new size_t[spacial_d_size];
size_t inputN = dims[0];
for (int i = 0; i < spacial_d_size; i++) {
if (convLayer._dilation[i])
KDims[i] = (convLayer._kernel[i] - 1) * convLayer._dilation[i] + 1;
else
KDims[i] = convLayer._kernel[i];
}
size_t OC = convLayer._out_depth;
std::string padType = convLayer._auto_pad;
if (padType == "valid") {
for (int i = 0; i < spacial_d_size; i++)
OD_temp[i] = std::ceil((dims[dims_size - 1 - i] - KDims[i] + 1.f) / convLayer._stride[i]);
} else if (padType == "same_upper") {
for (int i = 0; i < spacial_d_size; i++)
OD_temp[i] = std::ceil(1.f * dims[dims_size - 1 - i] / convLayer._stride[i]);
} else if (padType == "same_lower") {
for (int i = 0; i < spacial_d_size; i++)
OD_temp[i] = std::floor(1.f * dims[dims_size - 1 - i] / convLayer._stride[i]);
} else {
for (int i = 0; i < spacial_d_size; i++) {
OD_temp[i] =
std::floor(1.f *
(dims[dims_size - 1 - i] + convLayer._padding[i] + convLayer._pads_end[i] - KDims[i]) /
convLayer._stride[i]) +
1.f;
}
}
for (int i = 0; i < spacial_d_size; i++)
if (OD_temp[i] < 0)
THROW_IE_EXCEPTION << "New shapes " << details::dumpVec(dims) << " make output shape negative";
SizeVector outShape = {inputN, OC};
for (int i = spacial_d_size - 1; i >= 0; i--) outShape.push_back(static_cast<size_t>(OD_temp[i]));
outShapes.push_back(outShape);
delete[] OD_temp;
delete[] KDims;
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@@ -1,51 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for Crop layer
*/
class CropShapeProp : public BuiltInShapeInferImpl {
public:
explicit CropShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
CropLayer cropLayer(lp);
cropLayer.params = params;
cropLayer.type = _type;
validate(&cropLayer, inBlobs, params, blobs);
outShapes.push_back(inShapes[0]);
if (inShapes.size() == 2) {
SizeVector cropShapes = inShapes[1];
for (int axis : cropLayer.axis) {
outShapes[0][axis] = cropShapes[axis];
}
} else {
std::vector<int> crop_end;
bool isDim = cropLayer.params.find("dim") != cropLayer.params.end();
if (!isDim) crop_end = cropLayer.GetParamAsInts("crop_end");
for (size_t i = 0; i < cropLayer.axis.size(); i++) {
outShapes[0][cropLayer.axis[i]] =
isDim ? cropLayer.dim[i] : inShapes[0][cropLayer.axis[i]] - cropLayer.offset[i] - crop_end[i];
}
}
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@@ -1,38 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for CTCGreedyDecoder layer
*/
class CTCGreedyDecoderShapeProp : public BuiltInShapeInferImpl {
public:
explicit CTCGreedyDecoderShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
outShapes.clear();
LayerParams lp {};
CNNLayer cnnLayer(lp);
cnnLayer.params = params;
cnnLayer.type = _type;
validate(&cnnLayer, inBlobs, params, blobs);
outShapes.push_back({inShapes[0][1], inShapes[0][0], 1, 1});
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@@ -1,72 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <description_buffer.hpp>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for Deconvolution layer
*/
class DeconvShapeProp : public BuiltInShapeInferImpl {
public:
explicit DeconvShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
DeconvolutionLayer deconvLayer(lp);
deconvLayer.params = params;
deconvLayer.type = _type;
validate(&deconvLayer, inBlobs, params, blobs);
auto dims = inShapes[0];
auto dims_size = dims.size();
auto spacial_d_size = dims.size() - 2;
float* OD_temp = new float[spacial_d_size];
size_t* KDims = new size_t[spacial_d_size];
size_t inputN = dims[0];
for (int i = 0; i < spacial_d_size; i++) {
if (deconvLayer._dilation[i])
KDims[i] = (deconvLayer._kernel[i] - 1) * deconvLayer._dilation[i] + 1;
else
KDims[i] = deconvLayer._kernel[i];
}
size_t OC = deconvLayer._out_depth;
std::string padType = deconvLayer._auto_pad;
if (padType == "valid") {
for (int i = 0; i < spacial_d_size; i++)
OD_temp[i] = (dims[dims_size - 1 - i] - 1) * deconvLayer._stride[i] + KDims[i];
} else if ((padType == "same_upper") || (padType == "same_lower")) {
for (int i = 0; i < spacial_d_size; i++) OD_temp[i] = dims[dims_size - 1 - i] * deconvLayer._stride[i];
} else {
for (int i = 0; i < spacial_d_size; i++)
OD_temp[i] = deconvLayer._stride[i] * (dims[dims_size - 1 - i] - 1) + KDims[i] -
deconvLayer._padding[i] - deconvLayer._pads_end[i];
}
for (int i = 0; i < spacial_d_size; i++)
if (OD_temp[i] < 0)
THROW_IE_EXCEPTION << "New shapes " << details::dumpVec(dims) << " make output shape negative";
SizeVector outShape = {inputN, OC};
for (int i = spacial_d_size - 1; i >= 0; i--) outShape.push_back(static_cast<size_t>(OD_temp[i]));
outShapes.emplace_back(outShape);
delete[] OD_temp;
delete[] KDims;
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@@ -1,77 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <cmath>
#include <description_buffer.hpp>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for Deformable Convolution layer
*/
class DeformableConvShapeProp : public BuiltInShapeInferImpl {
public:
explicit DeformableConvShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
DeformableConvolutionLayer deformableConvLayer(lp);
deformableConvLayer.params = params;
deformableConvLayer.type = _type;
validate(&deformableConvLayer, inBlobs, params, blobs);
auto dims = inShapes[0];
auto dims_size = dims.size();
auto spacial_d_size = dims.size() - 2;
std::vector<float> OD_temp(spacial_d_size);
std::vector<size_t> KDims(spacial_d_size);
size_t inputN = dims[0];
for (int i = 0; i < spacial_d_size; i++) {
if (deformableConvLayer._dilation[i])
KDims[i] = (deformableConvLayer._kernel[i] - 1) * deformableConvLayer._dilation[i] + 1;
else
KDims[i] = deformableConvLayer._kernel[i];
}
size_t OC = deformableConvLayer._out_depth;
std::string padType = deformableConvLayer._auto_pad;
if (padType == "valid") {
for (int i = 0; i < spacial_d_size; i++)
OD_temp[i] = std::ceil((dims[dims_size - 1 - i] - KDims[i] + 1.f) / deformableConvLayer._stride[i]);
} else if (padType == "same_upper") {
for (int i = 0; i < spacial_d_size; i++)
OD_temp[i] = std::ceil(1.f * dims[dims_size - 1 - i] / deformableConvLayer._stride[i]);
} else if (padType == "same_lower") {
for (int i = 0; i < spacial_d_size; i++)
OD_temp[i] = std::floor(1.f * dims[dims_size - 1 - i] / deformableConvLayer._stride[i]);
} else {
for (int i = 0; i < spacial_d_size; i++)
OD_temp[i] = std::floor(1.f *
(dims[dims_size - 1 - i] + deformableConvLayer._padding[i] +
deformableConvLayer._pads_end[i] - KDims[i]) /
deformableConvLayer._stride[i]) +
1.f;
}
for (int i = 0; i < spacial_d_size; i++)
if (OD_temp[i] < 0)
THROW_IE_EXCEPTION << "New shapes " << details::dumpVec(dims) << " make output shape negative";
SizeVector outShape = {inputN, OC};
for (int i = spacial_d_size - 1; i >= 0; i--) outShape.push_back(static_cast<size_t>(OD_temp[i]));
outShapes.emplace_back(outShape);
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@@ -1,42 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for DepthToSpace layer
*/
class DepthToSpaceShapeProp : public BuiltInShapeInferImpl {
public:
explicit DepthToSpaceShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
DepthToSpaceLayer depthToSpaceLayer(lp);
depthToSpaceLayer.params = params;
depthToSpaceLayer.type = _type;
validate(&depthToSpaceLayer, inBlobs, params, blobs);
unsigned int block_size = depthToSpaceLayer.block_size;
outShapes = {inShapes[0]};
outShapes[0][outShapes[0].size() - 1] = inShapes[0][inShapes[0].size() - 1] * block_size;
outShapes[0][outShapes[0].size() - 2] = inShapes[0][inShapes[0].size() - 2] * block_size;
outShapes[0][outShapes[0].size() - 3] = inShapes[0][inShapes[0].size() - 3] / block_size / block_size;
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@@ -1,41 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <legacy/ie_layers.h>
#include <description_buffer.hpp>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for DetectionOutput layer
*/
class DetectionOutputShapeProp : public BuiltInShapeInferImpl {
public:
explicit DetectionOutputShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
CNNLayer cnnLayer(lp);
cnnLayer.params = params;
cnnLayer.type = _type;
validate(&cnnLayer, inBlobs, params, blobs);
int top_k = cnnLayer.GetParamAsInt("keep_top_k");
outShapes.push_back({1, 1, static_cast<size_t>(top_k) * inShapes[0][0], 7});
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@@ -1,52 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <algorithm>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for EltWise layer
*/
class EltWiseShapeProp : public BuiltInShapeInferImpl {
public:
explicit EltWiseShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
EltwiseLayer eltwiseLayer(lp);
eltwiseLayer.params = params;
eltwiseLayer.type = _type;
validate(&eltwiseLayer, inBlobs, params, blobs);
if (inShapes.size() == 1) {
outShapes.push_back(inShapes[0]);
} else {
SizeVector outShape((std::max)(inShapes[0], inShapes[1]));
for (size_t ind = 0; ind < outShape.size(); ++ind) {
if (ind < inShapes[0].size() && ind < inShapes[1].size()) {
outShape[ind] = (std::max)(inShapes[0][ind], inShapes[1][ind]);
} else if (ind >= inShapes[0].size()) {
outShape[ind] = inShapes[1][ind];
} else {
outShape[ind] = inShapes[0][ind];
}
}
outShapes.push_back(outShape);
}
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@@ -1,57 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <description_buffer.hpp>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference that just assign input shapes to output shapes
*/
class EqualShapeProp : public BuiltInShapeInferImpl {
public:
explicit EqualShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
outShapes = inShapes;
}
};
class DoNothingShapeProp : public BuiltInShapeInferImpl {
public:
explicit DoNothingShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {}
};
class MemoryShapeProp : public BuiltInShapeInferImpl {
public:
explicit MemoryShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
std::stringstream ss;
ss.str(params.at("index"));
int idx;
ss >> idx;
//
if (idx == 1) {
outShapes = inShapes;
}
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@@ -1,37 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for Math layers
*/
class MathShapeProp : public BuiltInShapeInferImpl {
public:
explicit MathShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
MathLayer mathLayer(lp);
mathLayer.params = params;
mathLayer.type = _type;
validate(&mathLayer, inBlobs, params, blobs);
outShapes = {inShapes[0]};
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@@ -1,47 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for Fill layer
*/
class FillShapeProp : public BuiltInShapeInferImpl {
public:
explicit FillShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
FillLayer fillLayer(lp);
fillLayer.params = params;
fillLayer.type = _type;
validate(&fillLayer, inBlobs, params, blobs);
auto dimsBlob = *inBlobs.begin();
SizeVector shape;
SizeVector dims = dimsBlob->getTensorDesc().getDims();
auto* buffer = dimsBlob->cbuffer().as<int32_t*>();
if (!buffer || dimsBlob->getTensorDesc().getPrecision() != Precision::I32)
THROW_IE_EXCEPTION << " Fill dimensions vector should be I32!";
for (int i = 0; i < dimsBlob->size(); i++) {
shape.push_back(buffer[i]);
}
outShapes = {shape};
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@@ -1,67 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <legacy/ie_layers.h>
#include <description_buffer.hpp>
#include <functional>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for Reshape layer
*/
class FlattenShapeProp : public BuiltInShapeInferImpl {
public:
explicit FlattenShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
ReshapeLayer reshapeLayer(lp);
reshapeLayer.params = params;
reshapeLayer.type = _type;
validate(&reshapeLayer, inBlobs, params, blobs);
auto inputShape = inShapes[0];
size_t inputShapeTotal = std::accumulate(inputShape.begin(), inputShape.end(), 1lu, std::multiplies<size_t>());
SizeVector outShape;
int numAxes = reshapeLayer.num_axes;
int axis = reshapeLayer.axis;
size_t notFlatten = 1;
if (numAxes == -1 && axis == 0) {
outShape = {inputShapeTotal};
} else {
if (axis > 0) {
for (int i = 0; i < axis; i++) {
notFlatten *= inputShape[i];
outShape.push_back(inputShape[i]);
}
}
outShape.push_back(1);
if (numAxes > 0) {
for (int i = numAxes + 1; i < inputShape.size(); i++) {
notFlatten *= inputShape[i];
outShape.push_back(inputShape[i]);
}
}
outShape[axis] = inputShapeTotal / notFlatten;
}
outShapes.emplace_back(outShape);
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@@ -1,47 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for Gather layer
*/
class GatherShapeProp : public BuiltInShapeInferImpl {
public:
explicit GatherShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
GatherLayer gatherLayer(lp);
gatherLayer.params = params;
gatherLayer.type = _type;
validate(&gatherLayer, inBlobs, params, blobs);
int axis = gatherLayer.axis;
if (axis < 0) axis += inShapes[0].size();
outShapes.resize(1);
outShapes[0].resize(inShapes[0].size() + inShapes[1].size() - 1);
for (int i = 0; i < axis; i++) outShapes[0][i] = inShapes[0][i];
for (size_t i = 0; i < inShapes[1].size(); i++) outShapes[0][i + axis] = inShapes[1][i];
for (size_t i = axis + 1; i < inShapes[0].size(); i++)
outShapes[0][i + inShapes[1].size() - 1] = inShapes[0][i];
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@@ -1,37 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for GatherTree layer
*/
class GatherTreeShapeProp : public BuiltInShapeInferImpl {
public:
explicit GatherTreeShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
GatherLayer gatherLayer(lp);
gatherLayer.params = params;
gatherLayer.type = _type;
validate(&gatherLayer, inBlobs, params, blobs);
outShapes.resize(1, inShapes[0]);
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@@ -1,61 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <algorithm>
#include <cmath>
#include <description_buffer.hpp>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for Gemm layer
*/
class GemmShapeProp : public BuiltInShapeInferImpl {
public:
explicit GemmShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
// TODO: primitive does not support 5D tensor yet
LayerParams lp {};
GemmLayer gemmLayer(lp);
gemmLayer.params = params;
gemmLayer.type = _type;
validate(&gemmLayer, inBlobs, params, blobs);
auto dims0 = inShapes[0];
auto dims1 = inShapes[1];
SizeVector shapes;
for (int idx = 0; idx < dims0.size() - 2; idx++) {
unsigned long max_dim = dims0[idx] > dims1[idx] ? dims0[idx] : dims1[idx];
if (inShapes.size() == 3) {
auto dims2 = inShapes[2];
max_dim = max_dim > dims2[idx] ? max_dim : dims2[idx];
}
shapes.push_back(max_dim);
}
unsigned long xAxis = gemmLayer.transpose_a ? dims0.size() - 2 : dims0.size() - 1;
unsigned long yAxis = gemmLayer.transpose_b ? dims1.size() - 1 : dims1.size() - 2;
shapes.push_back(dims0[yAxis]);
shapes.push_back(dims1[xAxis]);
outShapes.push_back(shapes);
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@@ -1,37 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <description_buffer.hpp>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
class InnerProductShapeProp : public BuiltInShapeInferImpl {
public:
explicit InnerProductShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
FullyConnectedLayer fcLayer(lp);
fcLayer.params = params;
fcLayer.type = _type;
validate(&fcLayer, inBlobs, params, blobs);
size_t OC, ON;
ON = inShapes[0][0];
OC = fcLayer._out_num;
outShapes.emplace_back(std::initializer_list<size_t> {ON, OC});
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@@ -1,99 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <legacy/ie_layers.h>
#include <description_buffer.hpp>
#include <limits>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for Interp layer
*/
class InterpShapeProp : public BuiltInShapeInferImpl {
public:
explicit InterpShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
CNNLayer cnnLayer(lp);
cnnLayer.params = params;
cnnLayer.type = _type;
validate(&cnnLayer, inBlobs, params, blobs);
SizeVector outShape;
if (inBlobs.size() == 2) {
auto* buffer = inBlobs[1]->cbuffer().as<float*>();
if (buffer != nullptr) {
for (int i = 0; i < inBlobs[1]->size(); i++) {
outShape.push_back(static_cast<unsigned long>(buffer[i]));
}
} else {
THROW_IE_EXCEPTION << "Second input must have allocated data";
}
} else {
auto factor = cnnLayer.GetParamAsFloat("factor", 0);
auto shrink_factor = cnnLayer.GetParamAsFloat("shrink_factor", 0);
auto zoom_factor = cnnLayer.GetParamAsFloat("zoom_factor", 0);
auto height = static_cast<size_t>(cnnLayer.GetParamAsInt("height", 0));
auto width = static_cast<size_t>(cnnLayer.GetParamAsInt("width", 0));
auto IS_ZERO = [](float value) {
return std::fabs(value) < std::numeric_limits<float>::epsilon();
};
bool noFactor = IS_ZERO(zoom_factor) && IS_ZERO(shrink_factor) && IS_ZERO(factor);
size_t N, C, H, W;
N = inShapes[0][0];
C = inShapes[0][1];
H = inShapes[0][2];
W = inShapes[0][3];
auto SETW = [&width, &W](size_t value) {
if (width) {
W = width;
} else {
W = value;
}
};
auto SETH = [&height, &H](size_t value) {
if (height) {
H = height;
} else {
H = value;
}
};
if (noFactor) {
SETW(width);
SETH(height);
} else {
float actualFactor = factor;
if (!IS_ZERO(shrink_factor) || !IS_ZERO(zoom_factor)) {
if (!IS_ZERO(zoom_factor)) actualFactor = zoom_factor;
if (!IS_ZERO(shrink_factor)) actualFactor /= shrink_factor;
}
SETW(W * actualFactor);
SETH(H * actualFactor);
}
outShape = {N, C, H, W};
}
outShapes.push_back(outShape);
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@@ -1,37 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for NonMaxSuppression layer
*/
class NMSShapeProp : public BuiltInShapeInferImpl {
public:
explicit NMSShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
NonMaxSuppressionLayer nmsLayer(lp);
nmsLayer.params = params;
nmsLayer.type = _type;
validate(&nmsLayer, inBlobs, params, blobs);
outShapes.push_back({inShapes[1][0] * inShapes[1][1] * inShapes[1][2], 3});
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@@ -1,46 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <map>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for the OneHot layer
*/
class OneHotShapeProp : public BuiltInShapeInferImpl {
public:
explicit OneHotShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlob, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
OneHotLayer oneHotLayer(lp);
oneHotLayer.params = params;
oneHotLayer.type = _type;
validate(&oneHotLayer, inBlob, params, blobs);
auto& inShape = inShapes[0];
SizeVector outShape;
auto actual_axis = (oneHotLayer.axis == -1) ? inShape.size() : oneHotLayer.axis;
for (std::size_t idx = 0; idx < inShape.size() + 1; ++idx) {
if (idx < actual_axis)
outShape.push_back(inShape[idx]);
else if (idx == actual_axis)
outShape.push_back(oneHotLayer.depth);
else
outShape.push_back(inShape[idx - 1]);
}
outShapes.push_back(outShape);
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@@ -1,40 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for Pad layer
*/
class PadShapeProp : public BuiltInShapeInferImpl {
public:
explicit PadShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
PadLayer padLayer(lp);
padLayer.params = params;
padLayer.type = _type;
validate(&padLayer, inBlobs, params, blobs);
outShapes.push_back(inShapes[0]);
for (size_t i = 0; i < outShapes[0].size(); i++) {
outShapes[0][i] += padLayer.pads_begin[i] + padLayer.pads_end[i];
}
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@@ -1,48 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <legacy/ie_layers.h>
#include <description_buffer.hpp>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for Permute layer
*/
class PermuteShapeProp : public BuiltInShapeInferImpl {
public:
explicit PermuteShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
CNNLayer permuteLayer(lp);
permuteLayer.params = params;
permuteLayer.type = _type;
validate(&permuteLayer, inBlobs, params, blobs);
std::vector<size_t> order;
std::vector<int> layerOrder = permuteLayer.GetParamAsInts("order");
for (auto ord : layerOrder) order.push_back(static_cast<size_t>(ord));
SizeVector outShape;
for (size_t i = 0; i < inShapes[0].size(); i++) {
outShape.push_back(inShapes[0][order[i]]);
}
outShapes.emplace_back(outShape);
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@@ -1,88 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <cmath>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for Pooling layer
*/
class PoolingShapeProp : public BuiltInShapeInferImpl {
public:
explicit PoolingShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
PoolingLayer poolLayer(lp);
poolLayer.params = params;
poolLayer.type = _type;
validate(&poolLayer, inBlobs, params, blobs);
auto dims = inShapes[0];
auto dims_size = dims.size();
auto spacial_d_size = dims.size() - 2;
float* OD_temp = new float[spacial_d_size];
for (int i = 0; i < spacial_d_size; i++) OD_temp[i] = 1.f;
size_t inputN = dims[0];
size_t IC = dims[1];
std::string padType = poolLayer._auto_pad;
if (padType == "valid") {
for (int i = 0; i < spacial_d_size; i++)
OD_temp[i] = std::ceil((dims[dims_size - 1 - i] - poolLayer._kernel[i] + 1.f) / poolLayer._stride[i]);
} else if (padType == "same_upper") {
for (int i = 0; i < spacial_d_size; i++)
OD_temp[i] = std::ceil(1.f * dims[dims_size - 1 - i] / poolLayer._stride[i]);
} else if (padType == "same_lower") {
for (int i = 0; i < spacial_d_size; i++)
OD_temp[i] = std::floor(1.f * dims[dims_size - 1 - i] / poolLayer._stride[i]);
} else {
auto it = std::find_if(poolLayer.params.begin(), poolLayer.params.end(),
[](decltype(*poolLayer.params.begin())& lhs) {
return lhs.first == "rounding-type" || lhs.first == "rounding_type";
});
bool isCeil = true;
if (it != poolLayer.params.end()) {
if (it->second == "floor") isCeil = false;
}
for (int i = 0; i < spacial_d_size; i++)
OD_temp[i] +=
1.f *
(dims[dims_size - 1 - i] + poolLayer._padding[i] + poolLayer._pads_end[i] - poolLayer._kernel[i]) /
poolLayer._stride[i];
if (isCeil) {
for (int i = 0; i < spacial_d_size; i++) OD_temp[i] = std::ceil(OD_temp[i]);
} else {
for (int i = 0; i < spacial_d_size; i++) OD_temp[i] = std::floor(OD_temp[i]);
}
for (int i = 0; i < spacial_d_size; i++)
if ((OD_temp[i] - 1) * poolLayer._stride[i] >= dims[dims_size - 1 - i] + poolLayer._padding[i])
--OD_temp[i];
}
for (int i = 0; i < spacial_d_size; i++)
if (OD_temp[i] < 0)
THROW_IE_EXCEPTION << "New shapes " << details::dumpVec(dims) << " make output shape negative";
SizeVector outShape = {inputN, IC};
for (int i = spacial_d_size - 1; i >= 0; i--) outShape.push_back(static_cast<size_t>(OD_temp[i]));
outShapes.emplace_back(outShape);
delete[] OD_temp;
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@@ -1,42 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <legacy/ie_layers.h>
#include <description_buffer.hpp>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for PriorBoxClustered layer
*/
class PriorBoxClusteredShapeProp : public BuiltInShapeInferImpl {
public:
explicit PriorBoxClusteredShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
CNNLayer cnnLayer(lp);
cnnLayer.params = params;
cnnLayer.type = _type;
validate(&cnnLayer, inBlobs, params, blobs);
std::vector<float> widths = cnnLayer.GetParamAsFloats("width", {});
size_t res_prod = widths.size() * 4;
for (int i = 2; i < inShapes[0].size(); i++) res_prod *= inShapes[0][i];
outShapes.push_back({1, 2, res_prod});
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@@ -1,55 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <legacy/ie_layers.h>
#include <description_buffer.hpp>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for PriorBox layer
*/
class PriorBoxShapeProp : public BuiltInShapeInferImpl {
public:
explicit PriorBoxShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
CNNLayer cnnLayer(lp);
cnnLayer.params = params;
cnnLayer.type = _type;
validate(&cnnLayer, inBlobs, params, blobs);
std::vector<float> min_sizes = cnnLayer.GetParamAsFloats("min_size", {});
std::vector<float> max_sizes = cnnLayer.GetParamAsFloats("max_size", {});
bool flip = static_cast<bool>(cnnLayer.GetParamAsInt("flip"));
const std::vector<float> aspect_ratios = cnnLayer.GetParamAsFloats("aspect_ratio", {});
size_t num_priors = 0;
bool scale_all_sizes = static_cast<bool>(cnnLayer.GetParamAsInt("scale_all_sizes", 1));
if (scale_all_sizes) {
num_priors = ((flip ? 2 : 1) * aspect_ratios.size() + 1) * min_sizes.size() + max_sizes.size();
} else {
num_priors = (flip ? 2 : 1) * aspect_ratios.size() + min_sizes.size() - 1;
}
size_t res_prod = num_priors * 4;
for (int i = 2; i < inShapes[0].size(); i++) res_prod *= inShapes[0][i];
outShapes.push_back({1, 2, res_prod});
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@@ -1,41 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <legacy/ie_layers.h>
#include <description_buffer.hpp>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for PSRoiPooling layer
*/
class PSRoiPoolingShapeProp : public BuiltInShapeInferImpl {
public:
explicit PSRoiPoolingShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
CNNLayer cnnLayer(lp);
cnnLayer.params = params;
cnnLayer.type = _type;
validate(&cnnLayer, inBlobs, params, blobs);
size_t output_dim = static_cast<size_t>(cnnLayer.GetParamAsInt("output_dim"));
size_t group_size = static_cast<size_t>(cnnLayer.GetParamAsInt("group_size"));
outShapes.push_back({inShapes[1][0], output_dim, group_size, group_size});
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@@ -1,40 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <algorithm>
#include <cmath>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for quantize layer
*/
class QuantizeShapeProp : public BuiltInShapeInferImpl {
public:
explicit QuantizeShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
QuantizeLayer quantizeLayer(lp);
quantizeLayer.params = params;
quantizeLayer.type = _type;
validate(&quantizeLayer, inBlobs, params, blobs);
outShapes.push_back(inShapes[0]);
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@@ -1,49 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <cmath>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for Range layer
*/
class RangeShapeProp : public BuiltInShapeInferImpl {
public:
explicit RangeShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
RangeLayer rangeLayer(lp);
rangeLayer.params = params;
rangeLayer.type = _type;
validate(&rangeLayer, inBlobs, params, blobs);
const size_t RANGE_START = 0;
const size_t RANGE_LIMIT = 1;
const size_t RANGE_DELTA = 2;
float start = (inBlobs[RANGE_START]->cbuffer().as<float*>() +
inBlobs[RANGE_START]->getTensorDesc().getBlockingDesc().getOffsetPadding())[0];
float limit = (inBlobs[RANGE_LIMIT]->cbuffer().as<float*>() +
inBlobs[RANGE_LIMIT]->getTensorDesc().getBlockingDesc().getOffsetPadding())[0];
float delta = (inBlobs[RANGE_DELTA]->cbuffer().as<float*>() +
inBlobs[RANGE_DELTA]->getTensorDesc().getBlockingDesc().getOffsetPadding())[0];
size_t work_amount_dst = std::floor(std::abs((limit - start) / delta));
outShapes = {{work_amount_dst}};
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@@ -1,75 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <algorithm>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for Reduce layer
*/
class ReduceShapeProp : public BuiltInShapeInferImpl {
public:
explicit ReduceShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
ReduceLayer reduceLayer(lp);
reduceLayer.params = params;
reduceLayer.type = _type;
validate(&reduceLayer, inBlobs, params, blobs);
const size_t REDUCE_DATA = 0;
const size_t REDUCE_INDEXES = 1;
if (inBlobs.size() < 2) THROW_IE_EXCEPTION << " Incorrect number of inputs";
SizeVector idx_dims = inBlobs[REDUCE_INDEXES]->getTensorDesc().getDims();
if (idx_dims.size() > 1) THROW_IE_EXCEPTION << " Index vector should be 1 dimension";
if (inBlobs[REDUCE_INDEXES]->getTensorDesc().getPrecision() != Precision::I32)
THROW_IE_EXCEPTION << " Incorrect 'axes_to_reduction' input precision. Only I32 is supported!";
SizeVector data_dims = inBlobs[REDUCE_DATA]->getTensorDesc().getDims();
int32_t* idx_data = inBlobs[REDUCE_INDEXES]->cbuffer().as<int32_t*>() +
inBlobs[REDUCE_INDEXES]->getTensorDesc().getBlockingDesc().getOffsetPadding();
SizeVector axes;
for (size_t i = 0; i < idx_dims[0]; i++) {
int32_t axis = idx_data[i];
if (axis < 0) axis += data_dims.size();
if (static_cast<size_t>(axis) > data_dims.size())
THROW_IE_EXCEPTION << " Index to reduce exceeds data tensor dimension";
axes.push_back(static_cast<size_t>(axis));
}
bool keep_dims = reduceLayer.keep_dims;
SizeVector outShape;
SizeVector src_dims = inBlobs[REDUCE_DATA]->getTensorDesc().getDims();
for (size_t i = 0; i < src_dims.size(); i++) {
bool found = false;
for (size_t axis : axes)
if (i == axis) found = true;
if (found) {
if (keep_dims) outShape.push_back(1);
} else {
outShape.push_back(src_dims[i]);
}
}
outShapes.push_back(outShape);
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@@ -1,67 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <legacy/ie_layers.h>
#include <description_buffer.hpp>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for RegionYolo layer
*/
class RegionYoloShapeProp : public BuiltInShapeInferImpl {
public:
explicit RegionYoloShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
CNNLayer layer(lp);
layer.params = params;
int classes;
int coords;
int num;
bool do_softmax;
std::vector<int> mask;
classes = layer.GetParamAsInt("classes", 1);
coords = layer.GetParamAsInt("coords", 1);
num = layer.GetParamAsInt("num", 1);
do_softmax = static_cast<bool>(layer.GetParamAsInt("do_softmax", 1));
mask = layer.GetParamAsInts("mask", {});
unsigned int axis = layer.GetParamAsUInt("axis", 1);
int end_axis = layer.GetParamAsInt("end_axis", 1);
if (end_axis < 0) end_axis += inShapes[0].size();
SizeVector outShape;
if (do_softmax) {
size_t flat_dim = 1;
for (size_t i = 0; i < axis; i++) {
outShape.push_back(inShapes[0][i]);
}
for (size_t i = axis; i < end_axis + 1; i++) {
flat_dim *= inShapes[0][i];
}
outShape.push_back(flat_dim);
for (size_t i = end_axis + 1; i < inShapes[0].size(); i++) {
outShape.push_back(inShapes[0][i]);
}
} else {
outShape = {inShapes[0][0], (classes + coords + 1) * mask.size(), inShapes[0][2], inShapes[0][3]};
}
outShapes.push_back({outShape});
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@@ -1,49 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <legacy/ie_layers.h>
#include <description_buffer.hpp>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for ReorgYolo layer
*/
class ReorgYoloShapeProp : public BuiltInShapeInferImpl {
public:
explicit ReorgYoloShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
CNNLayer cnnLayer(lp);
cnnLayer.params = params;
cnnLayer.type = _type;
validate(&cnnLayer, inBlobs, params, blobs);
size_t stride = static_cast<size_t>(cnnLayer.GetParamAsInt("stride"));
SizeVector outShape;
for (size_t i = 0; i < inShapes[0].size(); i++) {
outShape.push_back(inShapes[0][i]);
if (i == 1) {
outShape[outShape.size() - 1] *= stride * stride;
} else if (i > 1) {
outShape[outShape.size() - 1] /= stride;
}
}
outShapes.push_back(outShape);
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@@ -1,75 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <legacy/ie_layers.h>
#include <description_buffer.hpp>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for Resample layer
*/
class ResampleShapeProp : public BuiltInShapeInferImpl {
public:
explicit ResampleShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
CNNLayer cnnLayer(lp);
cnnLayer.params = params;
cnnLayer.type = _type;
validate(&cnnLayer, inBlobs, params, blobs);
SizeVector outShape;
if (inBlobs.size() == 2) {
switch (inBlobs[1]->getTensorDesc().getPrecision()) {
case Precision::FP32: {
auto* buffer = inBlobs[1]->cbuffer().as<float*>();
if (buffer != nullptr) {
for (int i = 0; i < inBlobs[1]->size(); i++) {
outShape.push_back(static_cast<unsigned long>(buffer[i]));
}
} else {
THROW_IE_EXCEPTION << "Second input must have allocated data";
}
break;
}
case Precision::I32: {
auto* buffer = inBlobs[1]->cbuffer().as<int32_t*>();
if (buffer != nullptr) {
for (int i = 0; i < inBlobs[1]->size(); i++) {
outShape.push_back(static_cast<unsigned long>(buffer[i]));
}
} else {
THROW_IE_EXCEPTION << "Second input must have allocated data";
}
break;
}
default:
THROW_IE_EXCEPTION << "Unsupported second input precision";
}
} else {
auto scale = cnnLayer.GetParamAsFloat("factor");
outShape = {inShapes[0][0], inShapes[0][1]};
for (int i = 2; i < inShapes[0].size(); i++)
outShape.push_back(static_cast<size_t>(std::ceil(inShapes[0][i] * scale)));
}
outShapes.push_back(outShape);
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@@ -1,120 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <legacy/ie_layers.h>
#include <functional>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
#include "precision_utils.h"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for Reshape layer
*/
class ReshapeShapeProp : public BuiltInShapeInferImpl {
public:
explicit ReshapeShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
ReshapeLayer reshapeLayer(lp);
reshapeLayer.params = params;
reshapeLayer.type = _type;
validate(&reshapeLayer, inBlobs, params, blobs);
SizeVector outShape;
std::vector<int> reshapeMask;
if (inBlobs.size() == 2) {
if (inBlobs[1]->getTensorDesc().getPrecision() == Precision::FP32) {
auto* buffer = inBlobs[1]->cbuffer().as<float*>();
if (buffer != nullptr) {
for (int i = 0; i < inBlobs[1]->size(); i++) {
reshapeMask.push_back(static_cast<int>(buffer[i]));
}
} else {
THROW_IE_EXCEPTION << "Second input must have allocated data";
}
} else if (inBlobs[1]->getTensorDesc().getPrecision() == Precision::I32) {
auto* buffer = inBlobs[1]->cbuffer().as<int*>();
if (buffer != nullptr) {
reshapeMask.assign(buffer, buffer + inBlobs[1]->size());
} else {
THROW_IE_EXCEPTION << "Second input must have allocated data";
}
} else if (inBlobs[1]->getTensorDesc().getPrecision() == Precision::I64) {
auto* buffer = inBlobs[1]->cbuffer().as<int64_t*>();
if (buffer != nullptr) {
reshapeMask.assign(buffer, buffer + inBlobs[1]->size());
} else {
THROW_IE_EXCEPTION << "Second input must have allocated data";
}
} else if (inBlobs[1]->getTensorDesc().getPrecision() == Precision::U64) {
auto* buffer = inBlobs[1]->cbuffer().as<uint64_t*>();
if (buffer != nullptr) {
reshapeMask.assign(buffer, buffer + inBlobs[1]->size());
} else {
THROW_IE_EXCEPTION << "Second input must have allocated data";
}
} else if (inBlobs[1]->getTensorDesc().getPrecision() == Precision::FP16) {
auto* buffer = inBlobs[1]->cbuffer().as<uint16_t*>();
if (buffer != nullptr) {
for (int i = 0; i < inBlobs[1]->size(); i++) {
reshapeMask.push_back(static_cast<int>(PrecisionUtils::f16tof32(buffer[i])));
}
} else {
THROW_IE_EXCEPTION << "Second input must have allocated data";
}
} else {
THROW_IE_EXCEPTION << "Second input has unsupported precision";
}
} else {
reshapeMask = reshapeLayer.shape;
}
auto inputShape = inShapes[0];
size_t inputShapeTotal = std::accumulate(inputShape.begin(), inputShape.end(), 1lu, std::multiplies<size_t>());
if (reshapeMask.empty()) {
outShape = {inputShapeTotal};
} else {
size_t res = 1;
for (int i = 0; i < reshapeMask.size(); i++) {
if (reshapeMask[i] == 0) {
res *= inputShape[i];
} else if (reshapeMask[i] != -1) {
res *= reshapeMask[i];
}
}
size_t newDim = inputShapeTotal / res;
for (int i = 0; i < reshapeMask.size(); i++) {
if (reshapeMask[i] == 0) {
outShape.push_back(inputShape[i]);
} else if (reshapeMask[i] == -1) {
outShape.push_back(newDim);
} else {
outShape.push_back(reshapeMask[i]);
}
}
size_t outputShapeTotal = std::accumulate(outShape.begin(), outShape.end(), 1lu, std::multiplies<size_t>());
if (inputShapeTotal != outputShapeTotal) {
THROW_IE_EXCEPTION << "Invalid reshape mask (dim attribute): number of elements in input: "
<< details::dumpVec(inputShape) << " and output: " << details::dumpVec(outShape)
<< " mismatch";
}
}
outShapes.emplace_back(outShape);
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@@ -1,37 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for ReverseSequence layer
*/
class ReverseSequenceShapeProp : public BuiltInShapeInferImpl {
public:
explicit ReverseSequenceShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
ReverseSequenceLayer reverseSequenceLayer(lp);
reverseSequenceLayer.params = params;
reverseSequenceLayer.type = _type;
validate(&reverseSequenceLayer, inBlobs, params, blobs);
outShapes = {inShapes[0]};
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@@ -1,50 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <description_buffer.hpp>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for DetectionOutput layer
*/
class RNNShapeProp : public BuiltInShapeInferImpl {
public:
explicit RNNShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
RNNSequenceLayer rnn(lp);
rnn.params = params;
rnn.type = _type;
IE_SUPPRESS_DEPRECATED_START
rnn.precision = Precision::FP32; // FIXME: No ability to discover current precision. Assume fp32
IE_SUPPRESS_DEPRECATED_END
validate(&rnn, inBlobs, params, blobs);
int state_size = rnn.hidden_size;
int ns = rnn.cellType == RNNCellBase::LSTM ? 2 : 1;
auto data_dims = inShapes[0];
data_dims[2] = static_cast<size_t>(state_size);
outShapes.push_back(data_dims);
for (int i = 1; i < 1 + ns; i++) {
outShapes.push_back(inShapes[i]);
}
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@@ -1,47 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <legacy/ie_layers.h>
#include <description_buffer.hpp>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for RoiPooling layer
*/
class RoiPoolingShapeProp : public BuiltInShapeInferImpl {
public:
explicit RoiPoolingShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
CNNLayer cnnLayer(lp);
cnnLayer.params = params;
cnnLayer.type = _type;
validate(&cnnLayer, inBlobs, params, blobs);
SizeVector out_shapes = {inShapes[1][0], inShapes[0][1]};
for (auto attr : {"pooled_d", "pooled_h", "pooled_w"}) { // desired IR format: pooled="...,d,h,w"
int pooled = cnnLayer.GetParamAsInt(attr, -1);
if (pooled >= 0) {
out_shapes.push_back(static_cast<size_t>(pooled));
}
}
outShapes.push_back(out_shapes);
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@@ -1,56 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for ScatterUpdate layer
*/
class ScatterUpdateShapeProp : public BuiltInShapeInferImpl {
public:
explicit ScatterUpdateShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
ScatterUpdateLayer scatterUpdateLayer(lp);
scatterUpdateLayer.params = params;
scatterUpdateLayer.type = _type;
validate(&scatterUpdateLayer, inBlobs, params, blobs);
outShapes = {inShapes[0]};
}
};
/**
*@brief Implementation of Shape inference for ScatterElementsUpdate layer
*/
class ScatterElementsUpdateShapeProp : public BuiltInShapeInferImpl {
public:
explicit ScatterElementsUpdateShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
ScatterElementsUpdateLayer scatterElementsUpdateLayer(lp);
scatterElementsUpdateLayer.params = params;
scatterElementsUpdateLayer.type = _type;
validate(&scatterElementsUpdateLayer, inBlobs, params, blobs);
outShapes = {inShapes[0]};
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@@ -1,36 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for Select layer
*/
class SelectShapeProp : public BuiltInShapeInferImpl {
public:
explicit SelectShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
SelectLayer selectLayer(lp);
selectLayer.params = params;
selectLayer.type = _type;
validate(&selectLayer, inBlobs, params, blobs);
outShapes.push_back(inShapes[1]);
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@@ -1,33 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <cmath>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for Shape layer
*/
class ShapeShapeProp : public BuiltInShapeInferImpl {
public:
explicit ShapeShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
outShapes.push_back({inShapes[0].size()});
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@@ -1,37 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for ShuffleChannels layer
*/
class ShuffleChannelsShapeProp : public BuiltInShapeInferImpl {
public:
explicit ShuffleChannelsShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
ShuffleChannelsLayer shuffleChannelsLayer(lp);
shuffleChannelsLayer.params = params;
shuffleChannelsLayer.type = _type;
validate(&shuffleChannelsLayer, inBlobs, params, blobs);
outShapes = {inShapes[0]};
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@@ -1,42 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for SpaceToDepth layer
*/
class SpaceToDepthShapeProp : public BuiltInShapeInferImpl {
public:
explicit SpaceToDepthShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
SpaceToDepthLayer spaceToDepthLayer(lp);
spaceToDepthLayer.params = params;
spaceToDepthLayer.type = _type;
validate(&spaceToDepthLayer, inBlobs, params, blobs);
unsigned int block_size = spaceToDepthLayer.block_size;
outShapes = {inShapes[0]};
outShapes[0][outShapes[0].size() - 1] = inShapes[0][inShapes[0].size() - 1] / block_size;
outShapes[0][outShapes[0].size() - 2] = inShapes[0][inShapes[0].size() - 2] / block_size;
outShapes[0][outShapes[0].size() - 3] = inShapes[0][inShapes[0].size() - 3] * block_size * block_size;
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@@ -1,31 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for SparseFillEmptyRows layer
*/
class SparseFillEmptyRowsShapeProp : public BuiltInShapeInferImpl {
public:
explicit SparseFillEmptyRowsShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
THROW_IE_EXCEPTION << "SparseFillEmptyRows is not re-shapeable layer.";
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@@ -1,40 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for SparseSegmentReduce layer
*/
class SparseSegmentReduceShapeProp : public BuiltInShapeInferImpl {
public:
explicit SparseSegmentReduceShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
SparseSegmentReduceLayer sparse_segment_reduce_layer(lp);
sparse_segment_reduce_layer.params = params;
sparse_segment_reduce_layer.type = _type;
validate(&sparse_segment_reduce_layer, inBlobs, params, blobs);
// reshape output
auto output_shape = inShapes[0];
output_shape[0] = inShapes[1][0];
outShapes = {output_shape};
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@@ -1,54 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "ie_built_in_impl.hpp"
#include <map>
#include <memory>
#include <string>
#include <vector>
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for ExperimentalSparseWeightedReduce layer
*/
class ExperimentalSparseWeightedReduceShapeProp : public BuiltInShapeInferImpl {
public:
explicit ExperimentalSparseWeightedReduceShapeProp(const std::string& type) : BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs,
const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs,
std::vector<SizeVector>& outShapes) override {
LayerParams lp{};
ExperimentalSparseWeightedReduceLayer sparse_weighted_reduce_layer(lp);
sparse_weighted_reduce_layer.params = params;
sparse_weighted_reduce_layer.type = _type;
validate(&sparse_weighted_reduce_layer, inBlobs, params, blobs);
// compute a number of outputs
size_t num_outputs = 1;
// reshape available outputs
outShapes.resize(num_outputs);
outShapes[0] = inShapes[3];
if (inBlobs[2]->getTensorDesc().getPrecision() == Precision::I32) {
auto* buffer = inBlobs[2]->cbuffer().as<int*>();
if (buffer != nullptr) {
outShapes[0][0] = static_cast<size_t>(buffer[0]);
} else {
THROW_IE_EXCEPTION << "The third input must have allocated data";
}
} else {
THROW_IE_EXCEPTION << "The third must have I32 precision";
}
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@@ -1,50 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for Split layer
*/
class SplitShapeProp : public BuiltInShapeInferImpl {
public:
explicit SplitShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
SplitLayer splitLayer(lp);
splitLayer.params = params;
splitLayer.type = _type;
validate(&splitLayer, inBlobs, params, blobs);
std::vector<int> out_sizes = splitLayer.GetParamAsInts("out_sizes", {});
if (out_sizes.empty()) THROW_IE_EXCEPTION << "Value of out_sizes attribute is empty";
size_t sum(0);
for (const auto& size : out_sizes) sum += size;
if (sum != inShapes[0][splitLayer._axis])
THROW_IE_EXCEPTION << "The sum of the dimensions on the axis(" << splitLayer._axis
<< ") is not equal out_sizes: " << details::dumpVec(out_sizes);
for (const auto& size : out_sizes) {
outShapes.push_back(inShapes[0]);
outShapes[outShapes.size() - 1][splitLayer._axis] = static_cast<size_t>(size);
}
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@@ -1,122 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#ifndef NOMINMAX
#define NOMINMAX
#endif
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for Squeeze layer
*/
class SqueezeShapeProp : public BuiltInShapeInferImpl {
public:
explicit SqueezeShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
CNNLayer layer(lp);
layer.params = params;
layer.type = _type;
validate(&layer, inBlobs, params, blobs);
const size_t SQUEEZE_DATA = 0;
const size_t SQUEEZE_INDEXES = 1;
SizeVector data_dims;
SizeVector idx_dims;
idx_dims = inBlobs[SQUEEZE_INDEXES]->getTensorDesc().getDims();
if (idx_dims.size() > 1) THROW_IE_EXCEPTION << " Index vector should be 1 dimension";
data_dims = inBlobs[SQUEEZE_DATA]->getTensorDesc().getDims();
if (data_dims.size() <= idx_dims[0] && !(data_dims.size() == 1 && idx_dims[0] == 1))
THROW_IE_EXCEPTION << " Incompatible number of data dimensions and indexes vector length!";
SizeVector outShape;
switch (inBlobs[SQUEEZE_INDEXES]->getTensorDesc().getPrecision()) {
case Precision::FP32: {
procIndices<float>(inBlobs, SQUEEZE_INDEXES, data_dims, outShape, idx_dims);
} break;
case Precision::FP16: {
procIndices<ie_fp16>(inBlobs, SQUEEZE_INDEXES, data_dims, outShape, idx_dims);
} break;
case Precision::I32: {
procIndices<int32_t>(inBlobs, SQUEEZE_INDEXES, data_dims, outShape, idx_dims);
} break;
case Precision::I64: {
procIndices<int64_t>(inBlobs, SQUEEZE_INDEXES, data_dims, outShape, idx_dims);
} break;
case Precision::U64: {
procIndices<uint64_t>(inBlobs, SQUEEZE_INDEXES, data_dims, outShape, idx_dims);
} break;
default:
THROW_IE_EXCEPTION
<< "Incorrect 'indices_to_squeeze' input precision. Only FP32, FP16, I32, I64 and U64 are supported!";
}
outShapes.push_back(outShape);
}
private:
template <typename T>
void procIndices(const std::vector<Blob::CPtr>& inBlobs, const size_t SQUEEZE_INDEXES, SizeVector& data_dims,
SizeVector& outShape, const SizeVector& idx_dims) {
T* idx_data = inBlobs[SQUEEZE_INDEXES]->cbuffer().as<T*>() +
inBlobs[SQUEEZE_INDEXES]->getTensorDesc().getBlockingDesc().getOffsetPadding();
for (size_t i = 0; i < idx_dims[0]; i++) {
auto axis = castToInt32(idx_data[i]);
if (axis < 0) axis += data_dims.size();
if (axis > data_dims.size()) {
THROW_IE_EXCEPTION << "Index to squeeze exceeds data tensor dimension";
} else if (data_dims[axis] != 1) {
THROW_IE_EXCEPTION << "Index to squeeze of data tensor dimension is not 1";
}
}
for (size_t j = 0; j < data_dims.size(); j++) {
bool found = false;
for (size_t i = 0; i < inBlobs[SQUEEZE_INDEXES]->size(); i++) {
auto axis = castToInt32(idx_data[i]);
if (axis < 0) axis += data_dims.size();
if (j == static_cast<size_t>(axis)) found = true;
}
if (!found) outShape.push_back(data_dims[j]);
}
}
int32_t castToInt32(ie_fp16 x) {
return static_cast<int32_t>(InferenceEngine::PrecisionUtils::f16tof32(x));
}
int32_t castToInt32(uint64_t x) {
return static_cast<int32_t>(x);
}
int32_t castToInt32(int64_t x) {
return static_cast<int32_t>(x);
}
int32_t castToInt32(int32_t x) {
return x;
}
int32_t castToInt32(float x) {
return static_cast<int32_t>(x);
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@@ -1,34 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <algorithm>
#include <map>
#include <memory>
#include <shape_infer/const_infer/ie_strided_slice_const_infer.hpp>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for StridedSlice layer
*/
class StridedSliceShapeProp : public BuiltInShapeInferImpl {
public:
explicit StridedSliceShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
StridedSliceHelper helper(inBlobs, params);
outShapes.push_back(helper.getOutputShape());
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@@ -1,104 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <legacy/ie_layers.h>
#include <description_buffer.hpp>
#include <map>
#include <memory>
#include <shape_infer/ie_reshaper.hpp>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for DetectionOutput layer
*/
class TensorIteratorShapeProp : public BuiltInShapeInferImpl {
public:
explicit TensorIteratorShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void setOriginalLayer(const CNNLayer* layer) {
auto ti = dynamic_cast<const TensorIterator*>(layer);
if (!ti) THROW_IE_EXCEPTION << "Error during shape infer. Original layer is not TensorIterator.";
_original_ti = ti;
}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
TensorIterator ti(lp);
ti.params = params;
ti.type = _type;
ti.body = _original_ti->body;
ti.back_edges = _original_ti->back_edges;
ti.input_port_map = _original_ti->input_port_map;
ti.output_port_map = _original_ti->output_port_map;
validate(&ti, inBlobs, params, blobs);
// TODO: make util function to calculate num of iteration
int num_iteration = 1;
// Prepare input shapes for internal body
std::map<std::string, std::vector<size_t>> newInShapes;
for (auto& port_map : ti.input_port_map) {
int ext_port = port_map.from;
int int_port = port_map.to;
auto int_name = ti.body.inputs[int_port]->getName();
auto shape = inShapes[ext_port];
if (port_map.axis != -1) {
int size = shape[port_map.axis];
int start = port_map.start < 0 ? port_map.start + size + 1 : port_map.start;
int end = port_map.end < 0 ? port_map.end + size + 1 : port_map.end;
num_iteration = std::abs(end - start) / std::abs(port_map.stride);
// port with iterating through. Change dimension with iteration
shape[port_map.axis] = port_map.part_size;
}
newInShapes[int_name] = shape;
}
// Body shape infer
_body_reshaper = std::make_shared<Reshaper>(_original_ti->body.inputs);
_body_reshaper->runNoApply(newInShapes);
outShapes.resize(ti.output_port_map.size());
for (auto& port_map : ti.output_port_map) {
int ext_port = port_map.from;
int int_port = port_map.to;
auto& int_out_data = ti.body.outputs[int_port];
auto shape = _body_reshaper->getResultShapeFor(int_out_data);
if (port_map.axis != -1) {
// port with iterating through. Change dimension with iteration
shape[port_map.axis] *= num_iteration;
}
outShapes[ext_port] = shape;
}
}
void apply() {
if (!_body_reshaper)
THROW_IE_EXCEPTION << "Request of apply reshape results while shape infer was not finished";
_body_reshaper->apply();
_body_reshaper.reset(); // WA: reset _body_reshaper to release ownership for input data
}
private:
const TensorIterator* _original_ti = nullptr;
std::shared_ptr<Reshaper> _body_reshaper;
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@@ -1,37 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for Tile layer
*/
class TileShapeProp : public BuiltInShapeInferImpl {
public:
explicit TileShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
TileLayer tileLayer(lp);
tileLayer.params = params;
tileLayer.type = _type;
validate(&tileLayer, inBlobs, params, blobs);
outShapes.push_back(inShapes[0]);
outShapes[0][tileLayer.axis] *= tileLayer.tiles;
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@@ -1,67 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <algorithm>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for TopK layer
*/
class TopKShapeProp : public BuiltInShapeInferImpl {
public:
explicit TopKShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
TopKLayer topKLayer(lp);
topKLayer.params = params;
topKLayer.type = _type;
validate(&topKLayer, inBlobs, params, blobs);
const size_t TOPK_DATA = 0;
const size_t TOPK_K = 1;
if (inBlobs[TOPK_DATA]->getTensorDesc().getPrecision() != Precision::FP32)
THROW_IE_EXCEPTION << " Incorrect input data tensor precision. Only FP32 is supported!";
if (inBlobs[TOPK_K]->getTensorDesc().getPrecision() != Precision::I32)
THROW_IE_EXCEPTION << " Incorrect input index value precision. Only I32 is supported!";
if (inBlobs[TOPK_K]->getTensorDesc().getDims().size() > 1)
THROW_IE_EXCEPTION << " Index vector should be 1 dimension";
SizeVector src_dims = inBlobs[TOPK_DATA]->getTensorDesc().getDims();
int axis_ = topKLayer.axis;
if (axis_ < 0) axis_ += src_dims.size();
size_t axis = static_cast<size_t>(axis_);
if (src_dims.size() < (1 + axis))
THROW_IE_EXCEPTION << " Incorrect input parameters dimensions and axis number!";
int* src_k = inBlobs[TOPK_K]->cbuffer().as<int*>();
if (src_k == nullptr) THROW_IE_EXCEPTION << " Only const input for 'k' is supported!";
src_k += inBlobs[TOPK_K]->getTensorDesc().getBlockingDesc().getOffsetPadding();
outShapes.push_back(inShapes[0]);
outShapes.push_back(inShapes[0]);
outShapes[0][axis] = static_cast<size_t>(src_k[0]);
outShapes[1][axis] = static_cast<size_t>(src_k[0]);
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@@ -1,109 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#ifndef NOMINMAX
#define NOMINMAX
#endif
#include <algorithm>
#include <iostream>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for Unsqueeze layer
*/
class UnsqueezeShapeProp : public BuiltInShapeInferImpl {
public:
explicit UnsqueezeShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
CNNLayer unsqueezeLayer(lp);
unsqueezeLayer.params = params;
unsqueezeLayer.type = _type;
validate(&unsqueezeLayer, inBlobs, params, blobs);
const size_t UNSQUEEZE_DATA = 0;
const size_t UNSQUEEZE_INDEXES = 1;
SizeVector idx_dims = inBlobs[UNSQUEEZE_INDEXES]->getTensorDesc().getDims();
SizeVector data_dims = inBlobs[UNSQUEEZE_DATA]->getTensorDesc().getDims();
SizeVector outShape;
if (idx_dims.size() > 1) THROW_IE_EXCEPTION << " Index vector should be 1 dimension";
switch (inBlobs[UNSQUEEZE_INDEXES]->getTensorDesc().getPrecision()) {
case Precision::FP32: {
procIndices<float>(inBlobs, UNSQUEEZE_INDEXES, data_dims, outShape, idx_dims);
} break;
case Precision::FP16: {
procIndices<ie_fp16>(inBlobs, UNSQUEEZE_INDEXES, data_dims, outShape, idx_dims);
} break;
case Precision::I32: {
procIndices<int32_t>(inBlobs, UNSQUEEZE_INDEXES, data_dims, outShape, idx_dims);
} break;
default:
THROW_IE_EXCEPTION << "Incorrect 'indices_to_set' input precision. Only FP32, FP16 and I32 are supported!";
}
outShapes.push_back(outShape);
}
private:
template <typename T>
void procIndices(const std::vector<Blob::CPtr>& inBlobs, const size_t UNSQUEEZE_INDEXES, SizeVector& data_dims,
SizeVector& outShape, const SizeVector& idx_dims) {
T* idx_data = inBlobs[UNSQUEEZE_INDEXES]->cbuffer().as<T*>() +
inBlobs[UNSQUEEZE_INDEXES]->getTensorDesc().getBlockingDesc().getOffsetPadding();
if (!idx_data) {
outShape = data_dims;
return;
}
size_t max = data_dims.size();
for (size_t i = 0; i < idx_dims[0]; i++) {
auto axis = static_cast<size_t>(castToInt32(idx_data[i]));
max = std::max(max, axis);
}
max++;
if ((idx_dims[0] + data_dims.size()) < max) {
THROW_IE_EXCEPTION << "Indices_to_set for unsqueeze layer is out of tensor dimension";
}
max = inBlobs[UNSQUEEZE_INDEXES]->size() + data_dims.size();
for (size_t i = 0, j = 0, k = 0; i < max; i++) {
size_t index_to_push = 1;
if (k < inBlobs[UNSQUEEZE_INDEXES]->size() && i == castToInt32(idx_data[k])) {
k++;
} else {
index_to_push = data_dims[j++];
}
outShape.push_back(index_to_push);
}
}
int32_t castToInt32(ie_fp16 x) {
return static_cast<int32_t>(InferenceEngine::PrecisionUtils::f16tof32(x));
}
int32_t castToInt32(int32_t x) {
return x;
}
int32_t castToInt32(float x) {
return static_cast<int32_t>(x);
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

View File

@@ -1,44 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <legacy/ie_layers.h>
#include <description_buffer.hpp>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_built_in_impl.hpp"
namespace InferenceEngine {
namespace ShapeInfer {
/**
*@brief Implementation of Shape inference for Upsampling layer
*/
class UpsamplingShapeProp : public BuiltInShapeInferImpl {
public:
explicit UpsamplingShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
LayerParams lp {};
CNNLayer cnnLayer(lp);
cnnLayer.params = params;
cnnLayer.type = _type;
validate(&cnnLayer, inBlobs, params, blobs);
size_t scale = static_cast<size_t>(cnnLayer.GetParamAsInt("scale"));
SizeVector out_shapes = {inShapes[0][0], inShapes[0][1]};
for (int i = 2; i < inShapes[0].size(); i++) {
out_shapes.push_back(inShapes[0][i] * scale);
}
outShapes.push_back(out_shapes);
}
};
} // namespace ShapeInfer
} // namespace InferenceEngine

Some files were not shown because too many files have changed in this diff Show More