Compare commits
10 Commits
releases/2
...
releases/2
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0629a0eb08 | ||
|
|
5e7aaee3fd | ||
|
|
bed54b7572 | ||
|
|
1db261981a | ||
|
|
1ab1c86855 | ||
|
|
606dfcb96b | ||
|
|
051a924024 | ||
|
|
8c3acd4f4e | ||
|
|
cf6c4e72b3 | ||
|
|
7cd76c1d29 |
@@ -44,7 +44,7 @@ ie_dependent_option (ENABLE_AVX2 "Enable AVX2 optimizations" ON "X86_64 OR X86"
|
||||
|
||||
ie_dependent_option (ENABLE_AVX512F "Enable AVX512 optimizations" ON "X86_64 OR X86" OFF)
|
||||
|
||||
ie_dependent_option (ENABLE_PROFILING_ITT "ITT tracing of IE and plugins internals" ON "NOT CMAKE_CROSSCOMPILING" OFF)
|
||||
ie_option (ENABLE_PROFILING_ITT "Build with ITT tracing. Optionally configure pre-built ittnotify library though INTEL_VTUNE_DIR variable." OFF)
|
||||
|
||||
# Documentation build
|
||||
ie_option (ENABLE_DOCS "build docs using Doxygen" OFF)
|
||||
|
||||
@@ -116,7 +116,7 @@ For Intel® Distribution of OpenVINO™ toolkit, the Inference Engine package co
|
||||
[sample console applications](Samples_Overview.md) demonstrating how you can use
|
||||
the Inference Engine in your applications.
|
||||
|
||||
The open source version is available in the [OpenVINO™ toolkit GitHub repository](https://github.com/openvinotoolkit/openvino) and can be built for supported platforms using the <a href="https://github.com/openvinotoolkit/openvino/wiki/BuildingCode">Inference Engine Build Instructions</a>.
|
||||
The open source version is available in the [OpenVINO™ toolkit GitHub repository](https://github.com/openvinotoolkit/openvino) and can be built for supported platforms using the <a href="https://github.com/openvinotoolkit/openvino/blob/master/build-instruction.md">Inference Engine Build Instructions</a>.
|
||||
## See Also
|
||||
- [Inference Engine Samples](Samples_Overview.md)
|
||||
- [Intel® Deep Learning Deployment Toolkit Web Page](https://software.intel.com/en-us/computer-vision-sdk)
|
||||
|
||||
@@ -53,7 +53,7 @@ The officially supported Linux* build environment is the following:
|
||||
* GCC* 7.5.0 (for Ubuntu* 18.04) or GCC* 4.8.5 (for CentOS* 7.6)
|
||||
* CMake* version 3.10 or higher
|
||||
|
||||
> **NOTE**: For building samples from the open-source version of OpenVINO™ toolkit, see the [build instructions on GitHub](https://github.com/openvinotoolkit/openvino/wiki/BuildingCode).
|
||||
> **NOTE**: For building samples from the open-source version of OpenVINO™ toolkit, see the [build instructions on GitHub](https://github.com/openvinotoolkit/openvino/blob/master/build-instruction.md).
|
||||
|
||||
To build the C or C++ sample applications for Linux, go to the `<INSTALL_DIR>/inference_engine/samples/c` or `<INSTALL_DIR>/inference_engine/samples/cpp` directory, respectively, and run the `build_samples.sh` script:
|
||||
```sh
|
||||
|
||||
@@ -7,11 +7,11 @@ Inference Engine is a set of C++ libraries providing a common API to deliver inf
|
||||
|
||||
For Intel® Distribution of OpenVINO™ toolkit, Inference Engine binaries are delivered within release packages.
|
||||
|
||||
The open source version is available in the [OpenVINO™ toolkit GitHub repository](https://github.com/openvinotoolkit/openvino) and can be built for supported platforms using the <a href="https://github.com/openvinotoolkit/openvino/wiki/BuildingCode">Inference Engine Build Instructions</a>.
|
||||
The open source version is available in the [OpenVINO™ toolkit GitHub repository](https://github.com/openvinotoolkit/openvino) and can be built for supported platforms using the <a href="https://github.com/openvinotoolkit/openvino/blob/master/build-instruction.md">Inference Engine Build Instructions</a>.
|
||||
|
||||
To learn about how to use the Inference Engine API for your application, see the [Integrating Inference Engine in Your Application](Integrate_with_customer_application_new_API.md) documentation.
|
||||
|
||||
For complete API Reference, see the [Inference Engine API References](./api_references.html) section.
|
||||
For complete API Reference, see the [API Reference](usergroup29.html) section.
|
||||
|
||||
Inference Engine uses a plugin architecture. Inference Engine plugin is a software component that contains complete implementation for inference on a certain Intel® hardware device: CPU, GPU, VPU, etc. Each plugin implements the unified API and provides additional hardware-specific APIs.
|
||||
|
||||
|
||||
@@ -65,7 +65,7 @@ CNNNetwork network = core.ReadNetwork(strModel, make_shared_blob<uint8_t>({Preci
|
||||
- OpenVINO™ toolkit online documentation: [https://docs.openvinotoolkit.org](https://docs.openvinotoolkit.org)
|
||||
- Model Optimizer Developer Guide: [Model Optimizer Developer Guide](../MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md)
|
||||
- Inference Engine Developer Guide: [Inference Engine Developer Guide](Deep_Learning_Inference_Engine_DevGuide.md)
|
||||
- For more information on Sample Applications, see the [Inference Engine Samples Overview](Samples_Overview.md)
|
||||
- For more information on Sample Applications, see the [Inference Engine Samples Overview](Samples_Overview.html)
|
||||
- For information on a set of pre-trained models, see the [Overview of OpenVINO™ Toolkit Pre-Trained Models](@ref omz_models_intel_index)
|
||||
- For information on Inference Engine Tutorials, see the [Inference Tutorials](https://github.com/intel-iot-devkit/inference-tutorials-generic)
|
||||
- For IoT Libraries and Code Samples see the [Intel® IoT Developer Kit](https://github.com/intel-iot-devkit).
|
||||
|
||||
@@ -2,98 +2,98 @@
|
||||
|
||||
## Introducing the GNA Plugin
|
||||
|
||||
Intel® Gaussian & Neural Accelerator is a low-power neural coprocessor for continuous inference at the edge.
|
||||
Intel® Gaussian & Neural Accelerator is a low-power neural coprocessor for continuous inference at the edge.
|
||||
|
||||
Intel® GNA is not intended to replace classic inference devices such as
|
||||
CPU, graphics processing unit (GPU), or vision processing unit (VPU). It is designed for offloading
|
||||
Intel® GNA is not intended to replace classic inference devices such as
|
||||
CPU, graphics processing unit (GPU), or vision processing unit (VPU) . It is designed for offloading
|
||||
continuous inference workloads including but not limited to noise reduction or speech recognition
|
||||
to save power and free CPU resources.
|
||||
|
||||
The GNA plugin provides a way to run inference on Intel® GNA, as well as in the software execution mode on CPU.
|
||||
The GNA plugin provides a way to run inference on Intel® GNA, as well as in the software execution mode on CPU.
|
||||
|
||||
## Devices with Intel® GNA
|
||||
## Devices with Intel® GNA
|
||||
|
||||
Devices with Intel® GNA support:
|
||||
Devices with Intel® GNA support:
|
||||
|
||||
* [Intel® Speech Enabling Developer Kit](https://www.intel.com/content/www/us/en/support/articles/000026156/boards-and-kits/smart-home.html)
|
||||
* [Intel® Speech Enabling Developer Kit](https://www.intel.com/content/www/us/en/support/articles/000026156/boards-and-kits/smart-home.html)
|
||||
|
||||
* [Amazon Alexa\* Premium Far-Field Developer Kit](https://developer.amazon.com/en-US/alexa/alexa-voice-service/dev-kits/amazon-premium-voice)
|
||||
* [Amazon Alexa* Premium Far-Field Developer Kit](https://developer.amazon.com/en-US/alexa/alexa-voice-service/dev-kits/amazon-premium-voice)
|
||||
|
||||
* [Intel® Pentium® Silver Processors N5xxx, J5xxx and Intel® Celeron® Processors N4xxx, J4xxx](https://ark.intel.com/content/www/us/en/ark/products/codename/83915/gemini-lake.html):
|
||||
- Intel® Pentium® Silver J5005 Processor
|
||||
- Intel® Pentium® Silver N5000 Processor
|
||||
- Intel® Celeron® J4005 Processor
|
||||
- Intel® Celeron® J4105 Processor
|
||||
- Intel® Celeron® Processor N4100
|
||||
- Intel® Celeron® Processor N4000
|
||||
* [Intel® Pentium® Silver Processors N5xxx, J5xxx and Intel® Celeron® Processors N4xxx, J4xxx](https://ark.intel.com/content/www/us/en/ark/products/codename/83915/gemini-lake.html):
|
||||
- Intel® Pentium® Silver J5005 Processor
|
||||
- Intel® Pentium® Silver N5000 Processor
|
||||
- Intel® Celeron® J4005 Processor
|
||||
- Intel® Celeron® J4105 Processor
|
||||
- Intel® Celeron® Processor N4100
|
||||
- Intel® Celeron® Processor N4000
|
||||
|
||||
* [Intel® Core™ Processors (formerly codenamed Cannon Lake)](https://ark.intel.com/content/www/us/en/ark/products/136863/intel-core-i3-8121u-processor-4m-cache-up-to-3-20-ghz.html):
|
||||
Intel® Core™ i3-8121U Processor
|
||||
* [Intel® Core™ Processors (formerly codenamed Cannon Lake)](https://ark.intel.com/content/www/us/en/ark/products/136863/intel-core-i3-8121u-processor-4m-cache-up-to-3-20-ghz.html):
|
||||
Intel® Core™ i3-8121U Processor
|
||||
|
||||
* [10th Generation Intel® Core™ Processors (formerly codenamed Ice Lake)](https://ark.intel.com/content/www/us/en/ark/products/codename/74979/ice-lake.html):
|
||||
- Intel® Core™ i7-1065G7 Processor
|
||||
- Intel® Core™ i7-1060G7 Processor
|
||||
- Intel® Core™ i5-1035G4 Processor
|
||||
- Intel® Core™ i5-1035G7 Processor
|
||||
- Intel® Core™ i5-1035G1 Processor
|
||||
- Intel® Core™ i5-1030G7 Processor
|
||||
- Intel® Core™ i5-1030G4 Processor
|
||||
- Intel® Core™ i3-1005G1 Processor
|
||||
- Intel® Core™ i3-1000G1 Processor
|
||||
- Intel® Core™ i3-1000G4 Processor
|
||||
* [10th Generation Intel® Core™ Processors (formerly codenamed Ice Lake)](https://ark.intel.com/content/www/us/en/ark/products/codename/74979/ice-lake.html):
|
||||
- Intel® Core™ i7-1065G7 Processor
|
||||
- Intel® Core™ i7-1060G7 Processor
|
||||
- Intel® Core™ i5-1035G4 Processor
|
||||
- Intel® Core™ i5-1035G7 Processor
|
||||
- Intel® Core™ i5-1035G1 Processor
|
||||
- Intel® Core™ i5-1030G7 Processor
|
||||
- Intel® Core™ i5-1030G4 Processor
|
||||
- Intel® Core™ i3-1005G1 Processor
|
||||
- Intel® Core™ i3-1000G1 Processor
|
||||
- Intel® Core™ i3-1000G4 Processor
|
||||
|
||||
* All [11th Generation Intel® Core™ Processors (formerly codenamed Tiger Lake)](https://ark.intel.com/content/www/us/en/ark/products/codename/88759/tiger-lake.html).
|
||||
* All [11th Generation Intel® Core™ Processors (formerly codenamed Tiger Lake)](https://ark.intel.com/content/www/us/en/ark/products/codename/88759/tiger-lake.html).
|
||||
|
||||
> **NOTE**: On platforms where Intel® GNA is not enabled in the BIOS, the driver cannot be installed, so the GNA plugin uses the software emulation mode only.
|
||||
> **NOTE**: On platforms where Intel® GNA is not enabled in the BIOS, the driver cannot be installed, so the GNA plugin uses the software emulation mode only.
|
||||
|
||||
## Drivers and Dependencies
|
||||
|
||||
Intel® GNA hardware requires a driver to be installed on the system.
|
||||
Intel® GNA hardware requires a driver to be installed on the system.
|
||||
|
||||
* Linux\* OS:
|
||||
[Download Intel® GNA driver for Ubuntu Linux 18.04.3 LTS (with HWE Kernel version 5.0+)](https://download.01.org/opencv/drivers/gna/)
|
||||
[Download Intel® GNA driver for Ubuntu Linux 18.04.3 LTS (with HWE Kernel version 5.0+)](https://download.01.org/opencv/drivers/gna/)
|
||||
|
||||
* Windows\* OS:
|
||||
Intel® GNA driver for Windows is available through Windows Update\*
|
||||
Intel® GNA driver for Windows is available through Windows Update\*
|
||||
|
||||
## Models and Layers Limitations
|
||||
|
||||
Because of specifics of hardware architecture, Intel® GNA supports a limited set of layers, their kinds and combinations.
|
||||
For example, you should not expect the GNA Plugin to be able to run computer vision models, except those specifically adapted
|
||||
for the GNA Plugin, because the plugin does not fully support 2D convolutions.
|
||||
|
||||
For the list of supported layers, see the **GNA** column of the **Supported Layers** section in [Supported Devices](Supported_Devices.md).
|
||||
Because of specifics of hardware architecture, Intel® GNA supports a limited set of layers, their kinds and combinations.
|
||||
For example, you should not expect the GNA Plugin to be able to run computer vision models, except those specifically adapted for the GNA Plugin, because the plugin does not fully support
|
||||
2D convolutions.
|
||||
|
||||
The list of supported layers can be found
|
||||
[here](Supported_Devices.md) (see the GNA column of Supported Layers section).
|
||||
Limitations include:
|
||||
|
||||
- Only 1D convolutions are natively supported in the models converted from:
|
||||
- [Kaldi](../../MO_DG/prepare_model/convert_model/Convert_Model_From_Kaldi.md) framework
|
||||
- [TensorFlow](../../MO_DG/prepare_model/convert_model/Convert_Model_From_TensorFlow.md) framework. For TensorFlow models, use the `--disable_nhwc_to_nchw` option when running the Model Optimizer.
|
||||
- The number of output channels for convolutions must be a multiple of 4.
|
||||
- Permute layer support is limited to the cases where no data reordering is needed or when reordering is happening for two dimensions, at least one of which is not greater than 8.
|
||||
- [Kaldi](../../MO_DG/prepare_model/convert_model/Convert_Model_From_Kaldi.md) framework;
|
||||
- [TensorFlow](../../MO_DG/prepare_model/convert_model/Convert_Model_From_TensorFlow.md) framework; note that for TensorFlow models, the option `--disable_nhwc_to_nchw` must be used when running the Model Optimizer.
|
||||
- The number of output channels for convolutions must be a multiple of 4
|
||||
- Permute layer support is limited to the cases where no data reordering is needed, or when reordering is happening for 2 dimensions, at least one of which is not greater than 8
|
||||
|
||||
#### Experimental Support for 2D Convolutions
|
||||
|
||||
The Intel® GNA hardware natively supports only 1D convolution.
|
||||
The Intel® GNA hardware natively supports only 1D convolution.
|
||||
|
||||
However, 2D convolutions can be mapped to 1D when a convolution kernel moves in a single direction. GNA Plugin performs such a transformation for Kaldi `nnet1` convolution. From this perspective, the Intel® GNA hardware convolution operation accepts an `NHWC` input and produces an `NHWC` output. Because OpenVINO™ only supports the `NCHW` layout, you may need to insert `Permute` layers before or after convolutions.
|
||||
However, 2D convolutions can be mapped to 1D when a convolution kernel moves in a single direction. Such a transformation is performed by the GNA Plugin for Kaldi `nnet1` convolution. From this perspective, the Intel® GNA hardware convolution operation accepts a `NHWC` input and produces `NHWC` output. Because OpenVINO™ only supports the `NCHW` layout, it may be necessary to insert `Permute` layers before or after convolutions.
|
||||
|
||||
For example, the Kaldi model optimizer inserts such a permute after convolution for the [rm_cnn4a network](https://download.01.org/openvinotoolkit/models_contrib/speech/kaldi/rm_cnn4a_smbr/). This `Permute` layer is automatically removed by the GNA Plugin, because the Intel® GNA hardware convolution layer already produces the required `NHWC` result.
|
||||
For example, the Kaldi model optimizer inserts such a permute after convolution for the [rm_cnn4a network](https://download.01.org/openvinotoolkit/models_contrib/speech/kaldi/rm_cnn4a_smbr/). This `Permute` layer is automatically removed by the GNA Plugin, because the Intel® GNA hardware convolution layer already produces the required `NHWC` result.
|
||||
|
||||
## Operation Precision
|
||||
|
||||
Intel® GNA essentially operates in the low-precision mode, which represents a mix of 8-bit (`I8`), 16-bit (`I16`), and 32-bit (`I32`) integer computations. Outputs calculated using a reduced integer precision are different from the scores calculated using the floating point format, for example, `FP32` outputs calculated on CPU using the Inference Engine [CPU Plugin](CPU.md).
|
||||
Intel® GNA essentially operates in the low-precision mode, which represents a mix of 8-bit (`I8`), 16-bit (`I16`), and 32-bit (`I32`) integer computations, so compared to 32-bit floating point (`FP32`) results – for example, calculated on CPU using Inference Engine [CPU Plugin](CPU.md) – outputs calculated using reduced integer precision are different from the scores calculated using floating point.
|
||||
|
||||
Unlike other plugins supporting low-precision execution, the GNA plugin calculates quantization factors at the model loading time, so you can run a model without calibration.
|
||||
Unlike other plugins supporting low-precision execution, the GNA plugin calculates quantization factors at the model loading time, so a model can run without calibration.
|
||||
|
||||
## <a name="execution-modes">Execution Modes</a>
|
||||
## <a name="execution-models">Execution Modes</a>
|
||||
|
||||
| Mode | Description |
|
||||
| :---------------------------------| :---------------------------------------------------------|
|
||||
| `GNA_AUTO` | Uses Intel® GNA if available, otherwise uses software execution mode on CPU. |
|
||||
| `GNA_HW` | Uses Intel® GNA if available, otherwise raises an error. |
|
||||
| `GNA_SW` | *Deprecated*. Executes the GNA-compiled graph on CPU performing calculations in the same precision as the Intel® GNA, but not in the bit-exact mode. |
|
||||
| `GNA_SW_EXACT` | Executes the GNA-compiled graph on CPU performing calculations in the same precision as the Intel® GNA in the bit-exact mode. |
|
||||
| `GNA_AUTO` | Uses Intel® GNA if available, otherwise uses software execution mode on CPU. |
|
||||
| `GNA_HW` | Uses Intel® GNA if available, otherwise raises an error. |
|
||||
| `GNA_SW` | *Deprecated*. Executes the GNA-compiled graph on CPU performing calculations in the same precision as the Intel® GNA, but not in the bit-exact mode. |
|
||||
| `GNA_SW_EXACT` | Executes the GNA-compiled graph on CPU performing calculations in the same precision as the Intel® GNA in the bit-exact mode. |
|
||||
| `GNA_SW_FP32` | Executes the GNA-compiled graph on CPU but substitutes parameters and calculations from low precision to floating point (`FP32`). |
|
||||
|
||||
## Supported Configuration Parameters
|
||||
@@ -101,42 +101,42 @@ Unlike other plugins supporting low-precision execution, the GNA plugin calculat
|
||||
The plugin supports the configuration parameters listed below.
|
||||
The parameters are passed as `std::map<std::string, std::string>` on `InferenceEngine::Core::LoadNetwork` or `InferenceEngine::SetConfig`.
|
||||
|
||||
You can change the `KEY_GNA_DEVICE_MODE` parameter at run time using `InferenceEngine::ExecutableNetwork::SetConfig`, which works for any value excluding `GNA_SW_FP32`. This enables you to switch the
|
||||
The parameter `KEY_GNA_DEVICE_MODE` can also be changed at run time using `InferenceEngine::ExecutableNetwork::SetConfig` (for any values excluding `GNA_SW_FP32`). This allows switching the
|
||||
execution between software emulation mode and hardware emulation mode after the model is loaded.
|
||||
|
||||
The parameter names below correspond to their usage through API keys, such as `GNAConfigParams::KEY_GNA_DEVICE_MODE` or `PluginConfigParams::KEY_PERF_COUNT`.
|
||||
When specifying key values as raw strings, that is, when using Python API, omit the `KEY_` prefix.
|
||||
When specifying key values as raw strings (that is, when using Python API), omit the `KEY_` prefix.
|
||||
|
||||
| Parameter Name | Parameter Values | Default Value | Description |
|
||||
| :---------------------------------| :---------------------------------------------------------| :-----------| :------------------------------------------------------------------------|
|
||||
| `KEY_GNA_COMPACT_MODE` | `YES`/`NO` | `YES` | Enables I/O buffers reuse to save space. Makes debugging harder. |
|
||||
| `KEY_GNA_SCALE_FACTOR` | `FP32` number | 1.0 | Sets the scale factor to use for input quantization. |
|
||||
| `KEY_GNA_DEVICE_MODE` | `GNA_AUTO`/`GNA_HW`/`GNA_SW_EXACT`/`GNA_SW_FP32` | `GNA_AUTO` | One of the modes described in <a href="#execution-modes">Execution Modes</a> |
|
||||
| `KEY_GNA_FIRMWARE_MODEL_IMAGE` | `std::string` | `""` | Sets the name for the embedded model binary dump file. |
|
||||
| `KEY_GNA_PRECISION` | `I16`/`I8` | `I16` | Sets the preferred integer weight resolution for quantization. |
|
||||
| `KEY_PERF_COUNT` | `YES`/`NO` | `NO` | Turns on performance counters reporting. |
|
||||
| `KEY_GNA_LIB_N_THREADS` | 1-127 integer number | 1 | Sets the number of GNA accelerator library worker threads used for inference computation in software modes.
|
||||
| `KEY_GNA_COMPACT_MODE` | `YES`/`NO` | `YES` | Reuse I/O buffers to save space (makes debugging harder) |
|
||||
| `KEY_GNA_SCALE_FACTOR` | `FP32` number | 1.0 | Scale factor to use for input quantization |
|
||||
| `KEY_GNA_DEVICE_MODE` | `GNA_AUTO`/`GNA_HW`/`GNA_SW_EXACT`/`GNA_SW_FP32` | `GNA_AUTO` | One of the modes described <a name="execution-models">Execution Models</a> |
|
||||
| `KEY_GNA_FIRMWARE_MODEL_IMAGE` | `std::string` | `""` | Name for embedded model binary dump file |
|
||||
| `KEY_GNA_PRECISION` | `I16`/`I8` | `I16` | Hint to GNA plugin: preferred integer weight resolution for quantization |
|
||||
| `KEY_PERF_COUNT` | `YES`/`NO` | `NO` | Turn on performance counters reporting |
|
||||
| `KEY_GNA_LIB_N_THREADS` | 1-127 integer number | 1 | Sets the number of GNA accelerator library worker threads used for inference computation in software modes
|
||||
|
||||
## How to Interpret Performance Counters
|
||||
|
||||
As a result of collecting performance counters using `InferenceEngine::InferRequest::GetPerformanceCounts`, you can find various performance data about execution on GNA.
|
||||
Returned map stores a counter description as a key, and a counter value in the `realTime_uSec` field of the `InferenceEngineProfileInfo` structure. Current GNA implementation calculates counters for the whole utterance scoring and does not provide per-layer information. The API enables you to retrieve counter units in cycles, you can convert cycles to seconds as follows:
|
||||
Returned map stores a counter description as a key, counter value is stored in the `realTime_uSec` field of the `InferenceEngineProfileInfo` structure. Current GNA implementation calculates counters for the whole utterance scoring and does not provide per-layer information. API allows to retrieve counter units in cycles, but they can be converted to seconds as follows:
|
||||
|
||||
```
|
||||
seconds = cycles / frequency
|
||||
```
|
||||
|
||||
Refer to the table below to learn about the frequency of Intel® GNA inside a particular processor.
|
||||
Processor | Frequency of Intel® GNA
|
||||
Refer to the table below to learn about the frequency of Intel® GNA inside a particular processor.
|
||||
Processor | Frequency of Intel® GNA
|
||||
---|---
|
||||
Intel® Ice Lake processors| 400MHz
|
||||
Intel® Core™ i3-8121U processor| 400MHz
|
||||
Intel® Gemini Lake processors | 200MHz
|
||||
Intel® Ice Lake processors| 400MHz
|
||||
Intel® Core™ i3-8121U processor| 400MHz
|
||||
Intel® Gemini Lake processors | 200MHz
|
||||
|
||||
Performance counters provided for the time being:
|
||||
|
||||
* Scoring request performance results
|
||||
* Number of total cycles spent on scoring in hardware including compute and memory stall cycles
|
||||
* Number of total cycles spent on scoring in hardware (including compute and memory stall cycles)
|
||||
* Number of stall cycles spent in hardware
|
||||
|
||||
## Multithreading Support in GNA Plugin
|
||||
@@ -151,40 +151,40 @@ The GNA plugin supports the following configuration parameters for multithreadin
|
||||
|
||||
## Network Batch Size
|
||||
|
||||
Intel® GNA plugin supports the processing of context-windowed speech frames in batches of 1-8 frames in one
|
||||
Intel® GNA plugin supports the processing of context-windowed speech frames in batches of 1-8 frames in one
|
||||
input blob using `InferenceEngine::ICNNNetwork::setBatchSize`. Increasing batch size only improves efficiency of `Fully Connected` layers.
|
||||
|
||||
> **NOTE**: For networks with `Convolutional`, `LSTM`, or `Memory` layers, the only supported batch size is 1.
|
||||
|
||||
## Compatibility with Heterogeneous Plugin
|
||||
|
||||
Heterogeneous plugin was tested with the Intel® GNA as a primary device and CPU as a secondary device. To run inference of networks with layers unsupported by the GNA plugin, such as Softmax, use the Heterogeneous plugin with the `HETERO:GNA,CPU` configuration.
|
||||
Heterogeneous plugin was tested with the Intel® GNA as a primary device and CPU as a secondary device. To run inference of networks with layers unsupported by the GNA plugin (for example, Softmax), use the Heterogeneous plugin with the `HETERO:GNA,CPU` configuration. For the list of supported networks, see the [Supported Frameworks](#supported-frameworks).
|
||||
|
||||
> **NOTE:** Due to limitation of the Intel® GNA backend library, heterogenous support is limited to cases where in the resulted sliced graph, only one subgraph is scheduled to run on GNA\_HW or GNA\_SW devices.
|
||||
> **NOTE:** Due to limitation of the Intel® GNA backend library, heterogenous support is limited to cases where in the resulted sliced graph, only one subgraph is scheduled to run on GNA\_HW or GNA\_SW devices.
|
||||
|
||||
## Recovery from Interruption by High-Priority Windows Audio Processes\*
|
||||
## Recovery from interruption by high-priority Windows audio processes\*
|
||||
|
||||
GNA is designed for real-time workloads such as noise reduction.
|
||||
As noted in the introduction, GNA is designed for real-time workloads such as noise reduction.
|
||||
For such workloads, processing should be time constrained, otherwise extra delays may cause undesired effects such as
|
||||
*audio glitches*. To make sure that processing can satisfy real-time requirements, the GNA driver provides a Quality of Service
|
||||
(QoS) mechanism, which interrupts requests that might cause high-priority Windows audio processes to miss
|
||||
the schedule, thereby causing long running GNA tasks to terminate early.
|
||||
audio "glitches". To make sure that processing can satisfy real time requirements, the GNA driver provides a QoS
|
||||
(Quality of Service) mechanism which interrupts requests that might cause high-priority Windows audio processes to miss
|
||||
schedule, thereby causing long running GNA tasks to terminate early.
|
||||
|
||||
Applications should be prepared for this situation.
|
||||
If an inference in the `GNA_HW` mode cannot be executed because of such an interruption, then `InferRequest::Wait()` returns status code
|
||||
`StatusCode::INFER_NOT_STARTED`. In future releases, it will be changed to a more meaningful status code.
|
||||
If an inference (in `GNA_HW` mode) cannot be executed because of such an interruption, then `InferRequest::Wait()` will return status code
|
||||
`StatusCode::INFER_NOT_STARTED` (note that it will be changed to a more meaningful status code in future releases).
|
||||
|
||||
Any application working with GNA must properly react to this code.
|
||||
One of the strategies to adapt an application:
|
||||
Any application working with GNA must properly react if it receives this code. Various strategies are possible.
|
||||
One of the options is to immediately switch to GNA SW emulation mode:
|
||||
|
||||
1. Immediately switch to the GNA_SW emulation mode:
|
||||
```cpp
|
||||
std::map<std::string, Parameter> newConfig;
|
||||
newConfig[GNAConfigParams::KEY_GNA_DEVICE_MODE] = Parameter("GNA_SW_EXACT");
|
||||
executableNet.SetConfig(newConfig);
|
||||
|
||||
```
|
||||
2. Resubmit and switch back to GNA_HW expecting that the competing application has finished.
|
||||
|
||||
then resubmit and switch back to GNA_HW after some time hoping that the competing application has finished.
|
||||
|
||||
## See Also
|
||||
|
||||
|
||||
@@ -2,15 +2,15 @@
|
||||
|
||||
## Introducing HDDL Plugin
|
||||
|
||||
The Inference Engine HDDL plugin is developed for inference of neural networks on the Intel® Vision Accelerator Design with Intel® Movidius™ VPUs. It is designed for use cases which require large throughputs of deep learning inference. It provides dozens of times the throughput as the MYRIAD Plugin does.
|
||||
The Inference Engine HDDL plugin is developed for inference of neural networks on Intel® Vision Accelerator Design with Intel® Movidius™ VPUs which is designed for use cases those require large throughput of deep learning inference. It provides dozens amount of throughput as MYRIAD Plugin.
|
||||
|
||||
## Installation on Linux* OS
|
||||
|
||||
For installation instructions, refer to the [Installation Guide for Linux*](VPU.md).
|
||||
For installation instructions, refer to the [Installation Guide for Linux\*](VPU.md).
|
||||
|
||||
## Installation on Windows* OS
|
||||
|
||||
For installation instructions, refer to the [Installation Guide for Windows*](Supported_Devices.md).
|
||||
For installation instructions, refer to the [Installation Guide for Windows\*](Supported_Devices.md).
|
||||
|
||||
## Supported networks
|
||||
|
||||
@@ -30,7 +30,7 @@ In addition to common parameters for Myriad plugin and HDDL plugin, HDDL plugin
|
||||
| KEY_VPU_HDDL_STREAM_ID | string | empty string | Allows to execute inference on a specified device. |
|
||||
| KEY_VPU_HDDL_DEVICE_TAG | string | empty string | Allows to allocate/deallocate networks on specified devices. |
|
||||
| KEY_VPU_HDDL_BIND_DEVICE | YES/NO | NO | Whether the network should bind to a device. Refer to vpu_plugin_config.hpp. |
|
||||
| KEY_VPU_HDDL_RUNTIME_PRIORITY | singed int | 0 | Specify the runtime priority of a device among all devices that are running the same network. Refer to vpu_plugin_config.hpp. |
|
||||
| KEY_VPU_HDDL_RUNTIME_PRIORITY | singed int | 0 | Specify the runtime priority of a device among all devices that running a same network Refer to vpu_plugin_config.hpp. |
|
||||
|
||||
## See Also
|
||||
|
||||
|
||||
@@ -12,13 +12,6 @@ Model Optimizer produces an Intermediate Representation (IR) of the network, whi
|
||||
|
||||
* <code>.bin</code> - Contains the weights and biases binary data.
|
||||
|
||||
> **TIP**: You also can work with the Model Optimizer inside the OpenVINO™ [Deep Learning Workbench](@ref workbench_docs_Workbench_DG_Introduction) (DL Workbench).
|
||||
> [DL Workbench](@ref workbench_docs_Workbench_DG_Introduction) is a platform built upon OpenVINO™ and provides a web-based graphical environment that enables you to optimize, fine-tune, analyze, visualize, and compare
|
||||
> performance of deep learning models on various Intel® architecture
|
||||
> configurations. In the DL Workbench, you can use most of OpenVINO™ toolkit components.
|
||||
> <br>
|
||||
> Proceed to an [easy installation from Docker](@ref workbench_docs_Workbench_DG_Install_from_Docker_Hub) to get started.
|
||||
|
||||
## What's New in the Model Optimizer in this Release?
|
||||
|
||||
* Common changes:
|
||||
|
||||
@@ -242,8 +242,4 @@ To differentiate versions of the same operation type, like `ReLU`, the suffix `-
|
||||
`N` usually refers to the first `opsetN` where this version of the operation is introduced.
|
||||
It is not guaranteed that new operations will be named according to that rule, the naming convention might be changed, but not for old operations which are frozen completely.
|
||||
|
||||
---
|
||||
## See Also
|
||||
|
||||
* [Cut Off Parts of a Model](prepare_model/convert_model/Cutting_Model.md)
|
||||
|
||||
|
||||
@@ -45,8 +45,3 @@ Possible workaround is to upgrade default protobuf compiler (libprotoc 2.5.0) to
|
||||
libprotoc 2.6.1.
|
||||
|
||||
[protobuf_issue]: https://github.com/google/protobuf/issues/4272
|
||||
|
||||
---
|
||||
## See Also
|
||||
|
||||
* [Known Issues and Limitations in the Inference Engine](../IE_DG/Known_Issues_Limitations.md)
|
||||
|
||||
@@ -260,14 +260,6 @@ python3 -m easy_install dist/protobuf-3.6.1-py3.6-win-amd64.egg
|
||||
set PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=cpp
|
||||
```
|
||||
|
||||
---
|
||||
## See Also
|
||||
docs\MO_DG\prepare_model\Config_Model_Optimizer.md
|
||||
docs\install_guides\installing-openvino-raspbian.md
|
||||
|
||||
* [Converting a Model to Intermediate Representation (IR)](convert_model/Converting_Model.md)
|
||||
* [Install OpenVINO™ toolkit for Raspbian* OS](../../install_guides/installing-openvino-raspbian.md)
|
||||
* [Install Intel® Distribution of OpenVINO™ toolkit for Windows* 10](../../install_guides/installing-openvino-windows.md)
|
||||
* [Install Intel® Distribution of OpenVINO™ toolkit for Windows* with FPGA Support](../../install_guides/installing-openvino-windows-fpga.md)
|
||||
* [Install Intel® Distribution of OpenVINO™ toolkit for macOS*](../../install_guides/installing-openvino-macos.md)
|
||||
* [Configuration Guide for the Intel® Distribution of OpenVINO™ toolkit 2020.4 and the Intel® Vision Accelerator Design with an Intel® Arria® 10 FPGA SG2 (IEI's Mustang-F100-A10) on Linux* ](../../install_guides/VisionAcceleratorFPGA_Configure.md)
|
||||
|
||||
@@ -144,13 +144,3 @@ In this document, you learned:
|
||||
* Basic information about how the Model Optimizer works with Caffe\* models
|
||||
* Which Caffe\* models are supported
|
||||
* How to convert a trained Caffe\* model using the Model Optimizer with both framework-agnostic and Caffe-specific command-line options
|
||||
|
||||
---
|
||||
## See Also
|
||||
|
||||
* [Converting a TensorFlow* Model](Convert_Model_From_TensorFlow.md)
|
||||
* [Converting an MXNet* Model](Convert_Model_From_MxNet.md)
|
||||
* [Converting a Kaldi* Model](Convert_Model_From_Kaldi.md)
|
||||
* [Converting an ONNX* Model](Convert_Model_From_ONNX.md)
|
||||
* [Converting a Model Using General Conversion Parameters](Converting_Model_General.md)
|
||||
* [Custom Layers in the Model Optimizer ](../customize_model_optimizer/Customize_Model_Optimizer.md)
|
||||
|
||||
@@ -106,12 +106,3 @@ must be copied to `Parameter_0_for_Offset_fastlstm2.r_trunc__2Offset_fastlstm2.r
|
||||
|
||||
## Supported Kaldi\* Layers
|
||||
Refer to [Supported Framework Layers ](../Supported_Frameworks_Layers.md) for the list of supported standard layers.
|
||||
|
||||
---
|
||||
## See Also
|
||||
|
||||
* [Converting a TensorFlow* Model](Convert_Model_From_TensorFlow.md)
|
||||
* [Converting an MXNet* Model](Convert_Model_From_MxNet.md)
|
||||
* [Converting a Caffe* Model](Convert_Model_From_Caffe.md)
|
||||
* [Converting an ONNX* Model](Convert_Model_From_ONNX.md)
|
||||
* [Custom Layers Guide](../../../HOWTO/Custom_Layers_Guide.md)
|
||||
|
||||
@@ -103,12 +103,3 @@ In this document, you learned:
|
||||
* Basic information about how the Model Optimizer works with MXNet\* models
|
||||
* Which MXNet\* models are supported
|
||||
* How to convert a trained MXNet\* model using the Model Optimizer with both framework-agnostic and MXNet-specific command-line options
|
||||
|
||||
---
|
||||
## See Also
|
||||
|
||||
* [Converting a TensorFlow* Model](Convert_Model_From_TensorFlow.md)
|
||||
* [Converting a Caffe* Model](Convert_Model_From_Caffe.md)
|
||||
* [Converting a Kaldi* Model](Convert_Model_From_Kaldi.md)
|
||||
* [Converting an ONNX* Model](Convert_Model_From_ONNX.md)
|
||||
* [Custom Layers in the Model Optimizer](../customize_model_optimizer/Customize_Model_Optimizer.md)
|
||||
|
||||
@@ -78,12 +78,3 @@ There are no ONNX\* specific parameters, so only [framework-agnostic parameters]
|
||||
|
||||
## Supported ONNX\* Layers
|
||||
Refer to [Supported Framework Layers](../Supported_Frameworks_Layers.md) for the list of supported standard layers.
|
||||
|
||||
---
|
||||
## See Also
|
||||
|
||||
* [Converting a TensorFlow* Model](Convert_Model_From_TensorFlow.md)
|
||||
* [Converting an MXNet* Model](Convert_Model_From_MxNet.md)
|
||||
* [Converting a Caffe* Model](Convert_Model_From_Caffe.md)
|
||||
* [Converting a Kaldi* Model](Convert_Model_From_Kaldi.md)
|
||||
* [Convert TensorFlow* BERT Model to the Intermediate Representation ](tf_specific/Convert_BERT_From_Tensorflow.md)
|
||||
|
||||
@@ -375,12 +375,3 @@ In this document, you learned:
|
||||
* Which TensorFlow models are supported
|
||||
* How to freeze a TensorFlow model
|
||||
* How to convert a trained TensorFlow model using the Model Optimizer with both framework-agnostic and TensorFlow-specific command-line options
|
||||
|
||||
---
|
||||
## See Also
|
||||
|
||||
* [Converting a Caffe* Model](Convert_Model_From_Caffe.md)
|
||||
* [Converting an MXNet* Model](Convert_Model_From_MxNet.md)
|
||||
* [Converting a Kaldi* Model](Convert_Model_From_Kaldi.md)
|
||||
* [Converting an ONNX* Model](Convert_Model_From_ONNX.md)
|
||||
* [Converting a Model Using General Conversion Parameters](Converting_Model_General.md)
|
||||
|
||||
@@ -233,13 +233,3 @@ Otherwise, it will be casted to data type passed to `--data_type` parameter (by
|
||||
```sh
|
||||
python3 mo.py --input_model FaceNet.pb --input "placeholder_layer_name->[0.1 1.2 2.3]"
|
||||
```
|
||||
|
||||
---
|
||||
## See Also
|
||||
|
||||
* [Converting a Cafee* Model](Convert_Model_From_Caffe.md)
|
||||
* [Converting a TensorFlow* Model](Convert_Model_From_TensorFlow.md)
|
||||
* [Converting an MXNet* Model](Convert_Model_From_MxNet.md)
|
||||
* [Converting an ONNX* Model](Convert_Model_From_ONNX.md)
|
||||
* [Converting a Kaldi* Model](Convert_Model_From_Kaldi.md)
|
||||
* [Using Shape Inference](../../../IE_DG/ShapeInference.md)
|
||||
|
||||
@@ -389,11 +389,4 @@ In this case, when `--input_shape` is specified and the node contains multiple i
|
||||
The correct command line is:
|
||||
```sh
|
||||
python3 mo.py --input_model=inception_v1.pb --input=0:InceptionV1/InceptionV1/Conv2d_1a_7x7/convolution --input_shape=[1,224,224,3]
|
||||
```
|
||||
|
||||
---
|
||||
## See Also
|
||||
|
||||
* [Sub-Graph Replacement in the Model Optimizer](../customize_model_optimizer/Subgraph_Replacement_Model_Optimizer.md)
|
||||
* [Extending the Model Optimizer with New Primitives](../customize_model_optimizer/Extending_Model_Optimizer_with_New_Primitives.md)
|
||||
* [Converting a Model Using General Conversion Parameters](Converting_Model_General.md)
|
||||
```
|
||||
@@ -34,11 +34,4 @@ Weights compression leaves `FakeQuantize` output arithmetically the same and wei
|
||||
See the visualization of `Convolution` with the compressed weights:
|
||||

|
||||
|
||||
Both Model Optimizer and Post-Training Optimization tool generate a compressed IR by default. To generate an expanded INT8 IR, use `--disable_weights_compression`.
|
||||
|
||||
---
|
||||
## See Also
|
||||
|
||||
* [Quantization](@ref pot_compression_algorithms_quantization_README)
|
||||
* [Optimization Guide](../../../optimization_guide/dldt_optimization_guide.md)
|
||||
* [Low Precision Optimization Guide](@ref pot_docs_LowPrecisionOptimizationGuide)
|
||||
Both Model Optimizer and Post-Training Optimization tool generate a compressed IR by default. To generate an expanded INT8 IR, use `--disable_weights_compression`.
|
||||
@@ -110,8 +110,3 @@ speech_sample -i feats.ark,ivector_online_ie.ark -m final.xml -d CPU -o predicti
|
||||
|
||||
Results can be decoded as described in "Use of Sample in Kaldi* Speech Recognition Pipeline" chapter
|
||||
in [the Speech Recognition Sample description](../../../../../inference-engine/samples/speech_sample/README.md).
|
||||
|
||||
---
|
||||
## See Also
|
||||
|
||||
* [Converting a Kaldi Model](../Convert_Model_From_Kaldi.md)
|
||||
|
||||
@@ -19,7 +19,6 @@ Measuring inference performance involves many variables and is extremely use-cas
|
||||
<script src="https://cdn.jsdelivr.net/npm/chartjs-plugin-datalabels"></script>
|
||||
<script src="https://cdnjs.cloudflare.com/ajax/libs/chartjs-plugin-annotation/0.5.7/chartjs-plugin-annotation.min.js"></script>
|
||||
<script src="https://cdn.jsdelivr.net/npm/chartjs-plugin-barchart-background@1.3.0/build/Plugin.Barchart.Background.min.js"></script>
|
||||
<script src="https://cdn.jsdelivr.net/npm/chartjs-plugin-deferred@1"></script>
|
||||
<!-- download this file and place on your server (or include the styles inline) -->
|
||||
<link rel="stylesheet" href="ovgraphs.css" type="text/css">
|
||||
\endhtmlonly
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
<!-- GET STARTED category -->
|
||||
<tab type="usergroup" title="GET STARTED" url="index.html">
|
||||
<!-- Install Directly -->
|
||||
<tab type="usergroup" title="Installation Guides" url=""><!--automatically generated-->
|
||||
<tab type="usergroup" title="Install Directly" url=""><!--automatically generated-->
|
||||
<tab type="usergroup" title="Linux" url="@ref openvino_docs_install_guides_installing_openvino_linux">
|
||||
<tab type="user" title="Install Intel® Distribution of OpenVINO™ toolkit for Linux* OS" url="@ref openvino_docs_install_guides_installing_openvino_linux"/>
|
||||
<tab type="user" title="[DEPRECATED] Install Intel® Distribution of OpenVINO™ toolkit for Linux with FPGA Support" url="@ref openvino_docs_install_guides_installing_openvino_linux_fpga"/>
|
||||
@@ -17,21 +17,19 @@
|
||||
</tab>
|
||||
<tab type="user" title="macOS" url="@ref openvino_docs_install_guides_installing_openvino_macos"/>
|
||||
<tab type="user" title="Raspbian OS" url="@ref openvino_docs_install_guides_installing_openvino_raspbian"/>
|
||||
<tab type="user" title="DL Workbench Installation Guide" url="./workbench_docs_Workbench_DG_Install_Workbench.html"/><!-- Link to the original Workbench topic -->
|
||||
</tab>
|
||||
<!-- Install From Images and Repositories -->
|
||||
<tab type="usergroup" title="Install From Images and Repositories" url="@ref openvino_docs_install_guides_installing_openvino_images">
|
||||
<tab type="usergroup" title="Install From Images and Repositories" url=""><!--automatically generated-->
|
||||
<tab type="usergroup" title="Docker" url="@ref openvino_docs_install_guides_installing_openvino_docker_linux">
|
||||
<tab type="user" title="Install Intel® Distribution of OpenVINO™ toolkit for Linux* from a Docker* Image" url="@ref openvino_docs_install_guides_installing_openvino_docker_linux"/>
|
||||
<tab type="user" title="Install Intel® Distribution of OpenVINO™ toolkit for Windows* from a Docker* Image" url="@ref openvino_docs_install_guides_installing_openvino_docker_windows"/>
|
||||
</tab>
|
||||
<tab type="user" title="Docker with DL Workbench" url="./workbench_docs_Workbench_DG_Install_from_Docker_Hub.html"/><!-- Link to the original Workbench topic -->
|
||||
<tab type="user" title="APT" url="@ref openvino_docs_install_guides_installing_openvino_apt"/>
|
||||
<tab type="user" title="YUM" url="@ref openvino_docs_install_guides_installing_openvino_yum"/>
|
||||
<tab type="user" title="Anaconda Cloud" url="@ref openvino_docs_install_guides_installing_openvino_conda"/>
|
||||
<tab type="user" title="Yocto" url="@ref openvino_docs_install_guides_installing_openvino_yocto"/>
|
||||
<tab type="user" title="PyPI" url="@ref openvino_docs_install_guides_installing_openvino_pip"/>
|
||||
<tab type="user" title="Build from Source" url="https://github.com/openvinotoolkit/openvino/wiki/BuildingCode"/>
|
||||
<tab type="user" title="Build from Source" url="https://github.com/openvinotoolkit/openvino/blob/master/build-instruction.md"/>
|
||||
</tab>
|
||||
<!-- Get Started Guides-->
|
||||
<tab type="usergroup" title="Get Started Guides" url=""><!--automatically generated-->
|
||||
@@ -39,8 +37,7 @@
|
||||
<tab type="user" title="Linux" url="@ref openvino_docs_get_started_get_started_linux"/>
|
||||
<tab type="user" title="Windows" url="@ref openvino_docs_get_started_get_started_windows"/>
|
||||
<tab type="user" title="macOS" url="@ref openvino_docs_get_started_get_started_macos"/>
|
||||
<tab type="user" title="Get Started with OpenVINO via DL Workbench" url="@ref openvino_docs_get_started_get_started_dl_workbench"/>
|
||||
<tab type="user" title="Legal Information" url="@ref openvino_docs_Legal_Information"/>
|
||||
<tab type="user" title="Legal Information" url="@ref openvino_docs_Legal_Information"/>
|
||||
</tab>
|
||||
<!-- Configuration for Hardware -->
|
||||
<tab type="usergroup" title="Configuration for Hardware" url=""><!--automatically generated-->
|
||||
@@ -112,8 +109,8 @@
|
||||
<tab type="user" title="Automatic Speech Recognition C++ Sample" url="@ref openvino_inference_engine_samples_speech_sample_README"/>
|
||||
<tab type="user" title="Neural Style Transfer C++ Sample" url="@ref openvino_inference_engine_samples_style_transfer_sample_README"/>
|
||||
<tab type="user" title="Neural Style Transfer Python* Sample" url="@ref openvino_inference_engine_ie_bridges_python_sample_style_transfer_sample_README"/>
|
||||
<tab type="user" title="Benchmark C++ Tool" url="@ref openvino_inference_engine_samples_benchmark_app_README"/>
|
||||
<tab type="user" title="Benchmark Python* Tool" url="@ref openvino_inference_engine_tools_benchmark_tool_README"/>
|
||||
<tab type="user" title="Benchmark C++ App" url="@ref openvino_inference_engine_samples_benchmark_app_README"/>
|
||||
<tab type="user" title="Benchmark Python* App" url="@ref openvino_inference_engine_tools_benchmark_tool_README"/>
|
||||
</tab>
|
||||
|
||||
<!-- DL Streamer Examples -->
|
||||
@@ -131,9 +128,6 @@
|
||||
<tab type="user" title="Draw Face Attributes Python Sample" url="@ref gst_samples_python_draw_face_attributes_README"/>
|
||||
<tab type="user" title="Benchmark Sample" url="@ref gst_samples_benchmark_README"/>
|
||||
</tab>
|
||||
<tab type="usergroup" title="Add-Ons" url="">
|
||||
<tab type="user" title="Model Server" url="@ref openvino_docs_ovms"/>
|
||||
</tab>
|
||||
</tab>
|
||||
|
||||
<!-- Chinese docs -->
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:6675f4b68df7eaa3d6188ecc8b5d53be572cf9c92f53abac3bc6416e6b428d0c
|
||||
size 196146
|
||||
@@ -1,3 +0,0 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:539deb67a7d1c0e8b0c037f8e7488445be0895e8e717bed5cfec64131936870c
|
||||
size 198207
|
||||
@@ -1,3 +0,0 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:2925e58a71d684e23776e6ed55cc85d9085b3ba5e484720528aeac5fa59f9e3a
|
||||
size 55404
|
||||
@@ -1,3 +0,0 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:f4a52661c05977d878c614c4f8510935982ce8a0e120e05690307d7c95e4ab31
|
||||
size 73999
|
||||
@@ -1,3 +0,0 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:ddb0550f3f04c177ec116d6c41e6d3a2ac1fedea7121e10ad3836f84c86a5c78
|
||||
size 35278
|
||||
@@ -1,3 +0,0 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:f1e329304ff3d586bb2b8e2442333ede085593f40b1567bd5250508d33d3b9f9
|
||||
size 32668
|
||||
@@ -1,3 +0,0 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:605515f25a746579d3622b7a274c7dece95e4fbfc6c1817f99431c1abf116070
|
||||
size 55409
|
||||
@@ -1,3 +0,0 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:0ca48900ca8f6733c4a8ebc957517fbed80f3c080f53d251eeebb01f082c8f83
|
||||
size 55646
|
||||
@@ -1,3 +0,0 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:ba94c2c0e0cb98b9e43c876d060d8a7965182461b0d505167eb71134d4975b8f
|
||||
size 58204
|
||||
@@ -1,3 +0,0 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:75628b7d02f1fe5c25a233fa16ae1c6c3d5060bf3d15bc7b1e5b9ea71ce50b73
|
||||
size 50227
|
||||
@@ -1,3 +0,0 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:72ab36115cecfee4b215e1b21911ebac3706e513b72eea7bb829932f7bdb3a19
|
||||
size 70515
|
||||
@@ -1,3 +0,0 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:70aee6f0fd30c8e2139950c6bc831dc11b2616ea8f04b991efc9b3f5b7b11ce6
|
||||
size 88891
|
||||
@@ -1,3 +0,0 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:c1e297da7f7dfd2af7a0ba47ba1e5c14376f21b15dfcde1fe6f5ad3412ad8feb
|
||||
size 21296
|
||||
@@ -1,140 +0,0 @@
|
||||
# Get Started with OpenVINO™ Toolkit via Deep Learning Workbench {#openvino_docs_get_started_get_started_dl_workbench}
|
||||
|
||||
The OpenVINO™ toolkit optimizes and runs Deep Learning Neural Network models on Intel® hardware. This guide helps you get started with the OpenVINO™ toolkit via the Deep Learning Workbench (DL Workbench) on Linux\*, Windows\*, or macOS\*.
|
||||
|
||||
In this guide, you will:
|
||||
* Learn the OpenVINO™ inference workflow.
|
||||
* Start DL Workbench on Linux. Links to instructions for other operating systems are provided as well.
|
||||
* Create a project and run a baseline inference.
|
||||
|
||||
[DL Workbench](@ref workbench_docs_Workbench_DG_Introduction) is a web-based graphical environment that enables you to easily use various sophisticated
|
||||
OpenVINO™ toolkit components:
|
||||
* [Model Downloader](@ref omz_tools_downloader_README) to download models from the [Intel® Open Model Zoo](@ref omz_models_intel_index)
|
||||
with pretrained models for a range of different tasks
|
||||
* [Model Optimizer](../MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md) to transform models into
|
||||
the Intermediate Representation (IR) format
|
||||
* [Post-Training Optimization toolkit](@ref pot_README) to calibrate a model and then execute it in the
|
||||
INT8 precision
|
||||
* [Accuracy Checker](@ref omz_tools_accuracy_checker_README) to determine the accuracy of a model
|
||||
* [Benchmark Tool](@ref openvino_inference_engine_samples_benchmark_app_README) to estimate inference performance on supported devices
|
||||
|
||||

|
||||
|
||||
DL Workbench supports the following scenarios:
|
||||
1. [Calibrate the model in INT8 precision](@ref workbench_docs_Workbench_DG_Int_8_Quantization)
|
||||
2. [Find the best combination](@ref workbench_docs_Workbench_DG_View_Inference_Results) of inference parameters: [number of streams and batches](../optimization_guide/dldt_optimization_guide.md)
|
||||
3. [Analyze inference results](@ref workbench_docs_Workbench_DG_Visualize_Model) and [compare them across different configurations](@ref workbench_docs_Workbench_DG_Compare_Performance_between_Two_Versions_of_Models)
|
||||
4. [Implement an optimal configuration into your application](@ref workbench_docs_Workbench_DG_Deploy_and_Integrate_Performance_Criteria_into_Application)
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Prerequisite | Linux* | Windows* | macOS*
|
||||
:----- | :----- |:----- |:-----
|
||||
Operating system|Ubuntu\* 18.04. Other Linux distributions, such as Ubuntu\* 16.04 and CentOS\* 7, are not validated.|Windows\* 10 | macOS\* 10.15 Catalina
|
||||
CPU | Intel® Core™ i5| Intel® Core™ i5 | Intel® Core™ i5
|
||||
GPU| Intel® Pentium® processor N4200/5 with Intel® HD Graphics | Not supported| Not supported
|
||||
HDDL, Myriad| Intel® Neural Compute Stick 2 <br> Intel® Vision Accelerator Design with Intel® Movidius™ VPUs| Not supported | Not supported
|
||||
Available RAM space| 4 GB| 4 GB| 4 GB
|
||||
Available storage space | 8 GB + space for imported artifacts| 8 GB + space for imported artifacts| 8 GB + space for imported artifacts
|
||||
Docker\*| Docker CE 18.06.1 | Docker Desktop 2.1.0.1|Docker CE 18.06.1
|
||||
Web browser| Google Chrome\* 76 <br> Browsers like Mozilla Firefox\* 71 or Apple Safari\* 12 are not validated. <br> Microsoft Internet Explorer\* is not supported.| Google Chrome\* 76 <br> Browsers like Mozilla Firefox\* 71 or Apple Safari\* 12 are not validated. <br> Microsoft Internet Explorer\* is not supported.| Google Chrome\* 76 <br>Browsers like Mozilla Firefox\* 71 or Apple Safari\* 12 are not validated. <br> Microsoft Internet Explorer\* is not supported.
|
||||
Resolution| 1440 x 890|1440 x 890|1440 x 890
|
||||
Internet|Optional|Optional|Optional
|
||||
Installation method| From Docker Hub <br> From OpenVINO™ toolkit package|From Docker Hub|From Docker Hub
|
||||
|
||||
## Start DL Workbench
|
||||
|
||||
This section provides instructions to run the DL Workbench on Linux from Docker Hub.
|
||||
|
||||
Use the command below to pull the latest Docker image with the application and run it:
|
||||
|
||||
```bash
|
||||
wget https://raw.githubusercontent.com/openvinotoolkit/workbench_aux/master/start_workbench.sh && bash start_workbench.sh
|
||||
```
|
||||
DL Workbench uses [authentication tokens](@ref workbench_docs_Workbench_DG_Authentication) to access the application. A token
|
||||
is generated automatically and displayed in the console output when you run the container for the first time. Once the command is executed, follow the link with the token. The **Get Started** page opens:
|
||||

|
||||
|
||||
For details and more installation options, visit the links below:
|
||||
* [Install DL Workbench from Docker Hub* on Linux* OS](@ref workbench_docs_Workbench_DG_Install_from_DockerHub_Linux)
|
||||
* [Install DL Workbench from Docker Hub on Windows*](@ref workbench_docs_Workbench_DG_Install_from_Docker_Hub_Win)
|
||||
* [Install DL Workbench from Docker Hub on macOS*](@ref workbench_docs_Workbench_DG_Install_from_Docker_Hub_mac)
|
||||
* [Install DL Workbench from the OpenVINO toolkit package on Linux](@ref workbench_docs_Workbench_DG_Install_from_Package)
|
||||
|
||||
## <a name="workflow-overview"></a>OpenVINO™ DL Workbench Workflow Overview
|
||||
|
||||
The simplified OpenVINO™ DL Workbench workflow is:
|
||||
1. **Get a trained model** for your inference task. Example inference tasks: pedestrian detection, face detection, vehicle detection, license plate recognition, head pose.
|
||||
2. **Run the trained model through the Model Optimizer** to convert the model to an Intermediate Representation, which consists of a pair of `.xml` and `.bin` files that are used as the input for Inference Engine.
|
||||
3. **Run inference against the Intermediate Representation** (optimized model) and output inference results.
|
||||
|
||||
## Run Baseline Inference
|
||||
|
||||
This section illustrates a sample use case of how to infer a pretrained model from the [Intel® Open Model Zoo](@ref omz_models_intel_index) with an autogenerated noise dataset on a CPU device.
|
||||
|
||||
<iframe width="560" height="315" src="https://www.youtube.com/embed/9TRJwEmY0K4" frameborder="0" allow="accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
|
||||
|
||||
|
||||
Once you log in to the DL Workbench, create a project, which is a combination of a model, a dataset, and a target device. Follow the steps below:
|
||||
|
||||
### Step 1. Open a New Project
|
||||
|
||||
On the the **Active Projects** page, click **Create** to open the **Create Project** page:
|
||||

|
||||
|
||||
### Step 2. Choose a Pretrained Model
|
||||
|
||||
Click **Import** next to the **Model** table on the **Create Project** page. The **Import Model** page opens. Select the squeezenet1.1 model from the Open Model Zoo and click **Import**.
|
||||

|
||||
|
||||
### Step 3. Convert the Model into Intermediate Representation
|
||||
|
||||
The **Convert Model to IR** tab opens. Keep the FP16 precision and click **Convert**.
|
||||

|
||||
|
||||
You are directed back to the **Create Project** page where you can see the status of the chosen model.
|
||||

|
||||
|
||||
### Step 4. Generate a Noise Dataset
|
||||
|
||||
Scroll down to the **Validation Dataset** table. Click **Generate** next to the table heading.
|
||||

|
||||
|
||||
The **Autogenerate Dataset** page opens. Click **Generate**.
|
||||

|
||||
|
||||
You are directed back to the **Create Project** page where you can see the status of the dataset.
|
||||

|
||||
|
||||
### Step 5. Create the Project and Run a Baseline Inference
|
||||
|
||||
On the **Create Project** page, select the imported model, CPU target, and the generated dataset. Click **Create**.
|
||||

|
||||
|
||||
The inference starts and you cannot proceed until it is done.
|
||||

|
||||
|
||||
Once the inference is complete, the **Projects** page opens automatically. Find your inference job in the **Projects Settings** table indicating all jobs.
|
||||

|
||||
|
||||
Congratulations, you have performed your first inference in the OpenVINO DL Workbench. Now you can proceed to:
|
||||
* [Select the inference](@ref workbench_docs_Workbench_DG_Run_Single_Inference)
|
||||
* [Visualize statistics](@ref workbench_docs_Workbench_DG_Visualize_Model)
|
||||
* [Experiment with model optimization](@ref workbench_docs_Workbench_DG_Int_8_Quantization)
|
||||
and inference options to profile the configuration
|
||||
|
||||
For detailed instructions to create a new project, visit the links below:
|
||||
* [Select a model](@ref workbench_docs_Workbench_DG_Select_Model)
|
||||
* [Select a dataset](@ref workbench_docs_Workbench_DG_Select_Datasets)
|
||||
* [Select a target and an environment](@ref workbench_docs_Workbench_DG_Select_Environment). This can be your local workstation or a remote target. If you use a remote target, [register the remote machine](@ref workbench_docs_Workbench_DG_Add_Remote_Target) first.
|
||||
|
||||
## Additional Resources
|
||||
|
||||
* [OpenVINO™ Release Notes](https://software.intel.com/en-us/articles/OpenVINO-RelNotes)
|
||||
* [OpenVINO™ Toolkit Overview](../index.md)
|
||||
* [DL Workbench Installation Guide](@ref workbench_docs_Workbench_DG_Install_Workbench)
|
||||
* [Inference Engine Developer Guide](../IE_DG/Deep_Learning_Inference_Engine_DevGuide.md)
|
||||
* [Model Optimizer Developer Guide](../MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md)
|
||||
* [Inference Engine Samples Overview](../IE_DG/Samples_Overview.md)
|
||||
* [Overview of OpenVINO™ Toolkit Pre-Trained Models](https://software.intel.com/en-us/openvino-toolkit/documentation/pretrained-models)
|
||||
* [OpenVINO™ Hello World Face Detection Exercise](https://github.com/intel-iot-devkit/inference-tutorials-generic)
|
||||
@@ -195,7 +195,7 @@ You will perform the following steps:
|
||||
|
||||
Each demo and code sample is a separate application, but they use the same behavior and components. The code samples and demo applications are:
|
||||
|
||||
* [Code Samples](../IE_DG/Samples_Overview.md) - Small console applications that show how to utilize specific OpenVINO capabilities within an application and execute specific tasks such as loading a model, running inference, querying specific device capabilities, and more.
|
||||
* [Code Samples](../IE_DG/Samples_Overview.html) - Small console applications that show how to utilize specific OpenVINO capabilities within an application and execute specific tasks such as loading a model, running inference, querying specific device capabilities, and more.
|
||||
|
||||
* [Demo Applications](@ref omz_demos_README) - Console applications that provide robust application templates to support developers in implementing specific deep learning scenarios. They may also involve more complex processing pipelines that gather analysis from several models that run inference simultaneously. For example concurrently detecting a person in a video stream and detecting attributes such as age, gender and/or emotions.
|
||||
|
||||
@@ -370,7 +370,7 @@ As an alternative, the Intel® Distribution of OpenVINO™ toolkit includes two
|
||||
|
||||
### <a name="run-image-classification"></a>Step 4: Run the Image Classification Code Sample
|
||||
|
||||
> **NOTE**: The Image Classification code sample is automatically compiled when you ran the Image Classification demo script. If you want to compile it manually, see the *Build the Sample Applications on Linux* section in the [Inference Engine Code Samples Overview](../IE_DG/Samples_Overview.md).
|
||||
> **NOTE**: The Image Classification code sample is automatically compiled when you ran the Image Classification demo script. If you want to compile it manually, see the [Inference Engine Code Samples Overview](../IE_DG/Samples_Overview.html#build_samples_linux) section.
|
||||
|
||||
To run the **Image Classification** code sample with an input image on the IR:
|
||||
|
||||
|
||||
@@ -99,4 +99,4 @@ Intel® Distribution of OpenVINO™ toolkit includes the following components:
|
||||
- [OpenCV](https://docs.opencv.org/master/) - OpenCV* community version compiled for Intel® hardware
|
||||
- [Intel® Media SDK](https://software.intel.com/en-us/media-sdk) (in Intel® Distribution of OpenVINO™ toolkit for Linux only)
|
||||
|
||||
OpenVINO™ Toolkit opensource version is available on [GitHub](https://github.com/openvinotoolkit/openvino). For building the Inference Engine from the source code, see the <a href="https://github.com/openvinotoolkit/openvino/wiki/BuildingCode">build instructions</a>.
|
||||
OpenVINO™ Toolkit opensource version is available on [GitHub](https://github.com/openvinotoolkit/openvino). For building the Inference Engine from the source code, see the <a href="https://github.com/openvinotoolkit/openvino/blob/master/build-instruction.md">build instructions</a>.
|
||||
@@ -9,17 +9,11 @@ This guide provides the steps for creating a Docker* image with Intel® Distribu
|
||||
**Target Operating Systems**
|
||||
|
||||
- Ubuntu\* 18.04 long-term support (LTS), 64-bit
|
||||
- Ubuntu\* 20.04 long-term support (LTS), 64-bit
|
||||
- CentOS\* 7.6
|
||||
|
||||
**Host Operating Systems**
|
||||
|
||||
- Linux with installed GPU driver and with Linux kernel supported by GPU driver
|
||||
|
||||
## Prebuilt images
|
||||
|
||||
Prebuilt images are available on [Docker Hub](https://hub.docker.com/u/openvino).
|
||||
|
||||
## Use Docker* Image for CPU
|
||||
|
||||
- Kernel reports the same information for all containers as for native application, for example, CPU, memory information.
|
||||
@@ -28,14 +22,127 @@ Prebuilt images are available on [Docker Hub](https://hub.docker.com/u/openvino)
|
||||
|
||||
### <a name="building-for-cpu"></a>Build a Docker* Image for CPU
|
||||
|
||||
You can use [available Dockerfiles](https://github.com/openvinotoolkit/docker_ci/tree/master/dockerfiles) or generate a Dockerfile with your setting via [DockerHub CI Framework](https://github.com/openvinotoolkit/docker_ci) for Intel® Distribution of OpenVINO™ toolkit.
|
||||
The Framework can generate a Dockerfile, build, test, and deploy an image with the Intel® Distribution of OpenVINO™ toolkit.
|
||||
To build a Docker image, create a `Dockerfile` that contains defined variables and commands required to create an OpenVINO toolkit installation image.
|
||||
|
||||
Create your `Dockerfile` using the following example as a template:
|
||||
|
||||
<details>
|
||||
<summary>Click to expand/collapse</summary>
|
||||
|
||||
```sh
|
||||
FROM ubuntu:18.04
|
||||
|
||||
USER root
|
||||
WORKDIR /
|
||||
|
||||
SHELL ["/bin/bash", "-xo", "pipefail", "-c"]
|
||||
|
||||
# Creating user openvino
|
||||
RUN useradd -ms /bin/bash openvino && \
|
||||
chown openvino -R /home/openvino
|
||||
|
||||
ARG DEPENDENCIES="autoconf \
|
||||
automake \
|
||||
build-essential \
|
||||
cmake \
|
||||
cpio \
|
||||
curl \
|
||||
gnupg2 \
|
||||
libdrm2 \
|
||||
libglib2.0-0 \
|
||||
lsb-release \
|
||||
libgtk-3-0 \
|
||||
libtool \
|
||||
udev \
|
||||
unzip \
|
||||
dos2unix"
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends ${DEPENDENCIES} && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
WORKDIR /thirdparty
|
||||
RUN sed -Ei 's/# deb-src /deb-src /' /etc/apt/sources.list && \
|
||||
apt-get update && \
|
||||
apt-get source ${DEPENDENCIES} && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# setup Python
|
||||
ENV PYTHON python3.6
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends python3-pip python3-dev lib${PYTHON}=3.6.9-1~18.04 && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ARG package_url=http://registrationcenter-download.intel.com/akdlm/irc_nas/16612/l_openvino_toolkit_p_0000.0.000.tgz
|
||||
ARG TEMP_DIR=/tmp/openvino_installer
|
||||
|
||||
WORKDIR ${TEMP_DIR}
|
||||
ADD ${package_url} ${TEMP_DIR}
|
||||
|
||||
# install product by installation script
|
||||
ENV INTEL_OPENVINO_DIR /opt/intel/openvino
|
||||
|
||||
RUN tar -xzf ${TEMP_DIR}/*.tgz --strip 1
|
||||
RUN sed -i 's/decline/accept/g' silent.cfg && \
|
||||
${TEMP_DIR}/install.sh -s silent.cfg && \
|
||||
${INTEL_OPENVINO_DIR}/install_dependencies/install_openvino_dependencies.sh
|
||||
|
||||
WORKDIR /tmp
|
||||
RUN rm -rf ${TEMP_DIR}
|
||||
|
||||
# installing dependencies for package
|
||||
WORKDIR /tmp
|
||||
|
||||
RUN ${PYTHON} -m pip install --no-cache-dir setuptools && \
|
||||
find "${INTEL_OPENVINO_DIR}/" -type f -name "*requirements*.*" -path "*/${PYTHON}/*" -exec ${PYTHON} -m pip install --no-cache-dir -r "{}" \; && \
|
||||
find "${INTEL_OPENVINO_DIR}/" -type f -name "*requirements*.*" -not -path "*/post_training_optimization_toolkit/*" -not -name "*windows.txt" -not -name "*ubuntu16.txt" -not -path "*/python3*/*" -not -path "*/python2*/*" -exec ${PYTHON} -m pip install --no-cache-dir -r "{}" \;
|
||||
|
||||
WORKDIR ${INTEL_OPENVINO_DIR}/deployment_tools/open_model_zoo/tools/accuracy_checker
|
||||
RUN source ${INTEL_OPENVINO_DIR}/bin/setupvars.sh && \
|
||||
${PYTHON} -m pip install --no-cache-dir -r ${INTEL_OPENVINO_DIR}/deployment_tools/open_model_zoo/tools/accuracy_checker/requirements.in && \
|
||||
${PYTHON} ${INTEL_OPENVINO_DIR}/deployment_tools/open_model_zoo/tools/accuracy_checker/setup.py install
|
||||
|
||||
WORKDIR ${INTEL_OPENVINO_DIR}/deployment_tools/tools/post_training_optimization_toolkit
|
||||
RUN if [ -f requirements.txt ]; then \
|
||||
${PYTHON} -m pip install --no-cache-dir -r ${INTEL_OPENVINO_DIR}/deployment_tools/tools/post_training_optimization_toolkit/requirements.txt && \
|
||||
${PYTHON} ${INTEL_OPENVINO_DIR}/deployment_tools/tools/post_training_optimization_toolkit/setup.py install; \
|
||||
fi;
|
||||
|
||||
# Post-installation cleanup and setting up OpenVINO environment variables
|
||||
RUN if [ -f "${INTEL_OPENVINO_DIR}"/bin/setupvars.sh ]; then \
|
||||
printf "\nsource \${INTEL_OPENVINO_DIR}/bin/setupvars.sh\n" >> /home/openvino/.bashrc; \
|
||||
printf "\nsource \${INTEL_OPENVINO_DIR}/bin/setupvars.sh\n" >> /root/.bashrc; \
|
||||
fi;
|
||||
RUN find "${INTEL_OPENVINO_DIR}/" -name "*.*sh" -type f -exec dos2unix {} \;
|
||||
|
||||
USER openvino
|
||||
WORKDIR ${INTEL_OPENVINO_DIR}
|
||||
|
||||
CMD ["/bin/bash"]
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
> **NOTE**: Please replace direct link to the Intel® Distribution of OpenVINO™ toolkit package to the latest version in the `package_url` argument. You can copy the link from the [Intel® Distribution of OpenVINO™ toolkit download page](https://software.seek.intel.com/openvino-toolkit) after registration. Right click on **Offline Installer** button on the download page for Linux in your browser and press **Copy link address**.
|
||||
|
||||
You can select which OpenVINO components will be installed by modifying `COMPONENTS` parameter in the `silent.cfg` file. For example to install only CPU runtime for the Inference Engine, set
|
||||
`COMPONENTS=intel-openvino-ie-rt-cpu__x86_64` in `silent.cfg`.
|
||||
|
||||
To get a full list of available components for installation, run the `./install.sh --list_components` command from the unpacked OpenVINO™ toolkit package.
|
||||
|
||||
To build a Docker* image for CPU, run the following command:
|
||||
```sh
|
||||
docker build . -t <image_name> \
|
||||
--build-arg HTTP_PROXY=<http://your_proxy_server.com:port> \
|
||||
--build-arg HTTPS_PROXY=<https://your_proxy_server.com:port>
|
||||
```
|
||||
|
||||
### Run the Docker* Image for CPU
|
||||
|
||||
Run the image with the following command:
|
||||
```sh
|
||||
docker run -it --rm <image_name>
|
||||
docker run -it <image_name>
|
||||
```
|
||||
## Use a Docker* Image for GPU
|
||||
### Build a Docker* Image for GPU
|
||||
@@ -46,9 +153,8 @@ docker run -it --rm <image_name>
|
||||
- Intel® OpenCL™ runtime package must be included into the container.
|
||||
- In the container, user must be in the `video` group.
|
||||
|
||||
Before building a Docker* image on GPU, add the following commands to a Dockerfile:
|
||||
Before building a Docker* image on GPU, add the following commands to the `Dockerfile` example for CPU above:
|
||||
|
||||
**Ubuntu 18.04/20.04**:
|
||||
```sh
|
||||
WORKDIR /tmp/opencl
|
||||
RUN usermod -aG video openvino
|
||||
@@ -64,36 +170,28 @@ RUN apt-get update && \
|
||||
ldconfig && \
|
||||
rm /tmp/opencl
|
||||
```
|
||||
**CentOS 7.6**:
|
||||
```sh
|
||||
WORKDIR /tmp/opencl
|
||||
RUN groupmod -g 44 video
|
||||
|
||||
RUN yum update -y && yum install -y epel-release && \
|
||||
yum update -y && yum install -y ocl-icd ocl-icd-devel && \
|
||||
yum clean all && rm -rf /var/cache/yum && \
|
||||
curl -L https://sourceforge.net/projects/intel-compute-runtime/files/19.41.14441/centos-7/intel-gmmlib-19.3.2-1.el7.x86_64.rpm/download -o intel-gmmlib-19.3.2-1.el7.x86_64.rpm && \
|
||||
curl -L https://sourceforge.net/projects/intel-compute-runtime/files/19.41.14441/centos-7/intel-gmmlib-devel-19.3.2-1.el7.x86_64.rpm/download -o intel-gmmlib-devel-19.3.2-1.el7.x86_64.rpm && \
|
||||
curl -L https://sourceforge.net/projects/intel-compute-runtime/files/19.41.14441/centos-7/intel-igc-core-1.0.2597-1.el7.x86_64.rpm/download -o intel-igc-core-1.0.2597-1.el7.x86_64.rpm && \
|
||||
curl -L https://sourceforge.net/projects/intel-compute-runtime/files/19.41.14441/centos-7/intel-igc-opencl-1.0.2597-1.el7.x86_64.rpm/download -o intel-igc-opencl-1.0.2597-1.el7.x86_64.rpm && \
|
||||
curl -L https://sourceforge.net/projects/intel-compute-runtime/files/19.41.14441/centos-7/intel-igc-opencl-devel-1.0.2597-1.el7.x86_64.rpm/download -o intel-igc-opencl-devel-1.0.2597-1.el7.x86_64.rpm && \
|
||||
curl -L https://sourceforge.net/projects/intel-compute-runtime/files/19.41.14441/centos-7/intel-opencl-19.41.14441-1.el7.x86_64.rpm/download -o intel-opencl-19.41.14441-1.el7.x86_64.rpm \
|
||||
rpm -ivh ${TEMP_DIR}/*.rpm && \
|
||||
ldconfig && \
|
||||
rm -rf ${TEMP_DIR} && \
|
||||
yum remove -y epel-release
|
||||
To build a Docker* image for GPU, run the following command:
|
||||
```sh
|
||||
docker build . -t <image_name> \
|
||||
--build-arg HTTP_PROXY=<http://your_proxy_server.com:port> \
|
||||
--build-arg HTTPS_PROXY=<https://your_proxy_server.com:port>
|
||||
```
|
||||
|
||||
### Run the Docker* Image for GPU
|
||||
|
||||
To make GPU available in the container, attach the GPU to the container using `--device /dev/dri` option and run the container:
|
||||
```sh
|
||||
docker run -it --rm --device /dev/dri <image_name>
|
||||
docker run -it --device /dev/dri <image_name>
|
||||
```
|
||||
|
||||
## Use a Docker* Image for Intel® Neural Compute Stick 2
|
||||
|
||||
### Build and Run the Docker* Image for Intel® Neural Compute Stick 2
|
||||
### Build a Docker* Image for Intel® Neural Compute Stick 2
|
||||
|
||||
Build a Docker image using the same steps as for CPU.
|
||||
|
||||
### Run the Docker* Image for Intel® Neural Compute Stick 2
|
||||
|
||||
**Known limitations:**
|
||||
|
||||
@@ -101,24 +199,12 @@ docker run -it --rm --device /dev/dri <image_name>
|
||||
- UDEV events are not forwarded to the container by default it does not know about device reconnection.
|
||||
- Only one device per host is supported.
|
||||
|
||||
Use one of the following options as **Possible solutions for Intel® Neural Compute Stick 2:**
|
||||
Use one of the following options to run **Possible solutions for Intel® Neural Compute Stick 2:**
|
||||
|
||||
#### Option #1
|
||||
1. Get rid of UDEV by rebuilding `libusb` without UDEV support in the Docker* image (add the following commands to a `Dockerfile`):
|
||||
- **Ubuntu 18.04/20.04**:
|
||||
- **Solution #1**:
|
||||
1. Get rid of UDEV by rebuilding `libusb` without UDEV support in the Docker* image (add the following commands to the `Dockerfile` example for CPU above):<br>
|
||||
```sh
|
||||
ARG BUILD_DEPENDENCIES="autoconf \
|
||||
automake \
|
||||
build-essential \
|
||||
libtool \
|
||||
unzip \
|
||||
udev"
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends ${BUILD_DEPENDENCIES} && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN usermod -aG users openvino
|
||||
|
||||
WORKDIR /opt
|
||||
RUN curl -L https://github.com/libusb/libusb/archive/v1.0.22.zip --output v1.0.22.zip && \
|
||||
unzip v1.0.22.zip
|
||||
@@ -127,6 +213,9 @@ WORKDIR /opt/libusb-1.0.22
|
||||
RUN ./bootstrap.sh && \
|
||||
./configure --disable-udev --enable-shared && \
|
||||
make -j4
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends libusb-1.0-0-dev=2:1.0.21-2 && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
WORKDIR /opt/libusb-1.0.22/libusb
|
||||
RUN /bin/mkdir -p '/usr/local/lib' && \
|
||||
@@ -137,103 +226,38 @@ RUN /bin/mkdir -p '/usr/local/lib' && \
|
||||
|
||||
WORKDIR /opt/libusb-1.0.22/
|
||||
RUN /usr/bin/install -c -m 644 libusb-1.0.pc '/usr/local/lib/pkgconfig' && \
|
||||
cp /opt/intel/openvino/deployment_tools/inference_engine/external/97-myriad-usbboot.rules /etc/udev/rules.d/ && \
|
||||
ldconfig
|
||||
```
|
||||
- **CentOS 7.6**:
|
||||
<br>
|
||||
2. Run the Docker* image:<br>
|
||||
```sh
|
||||
ARG BUILD_DEPENDENCIES="autoconf \
|
||||
automake \
|
||||
libtool \
|
||||
unzip \
|
||||
udev"
|
||||
|
||||
# hadolint ignore=DL3031, DL3033
|
||||
RUN yum update -y && yum install -y ${BUILD_DEPENDENCIES} && \
|
||||
yum group install -y "Development Tools" && \
|
||||
yum clean all && rm -rf /var/cache/yum
|
||||
|
||||
WORKDIR /opt
|
||||
RUN curl -L https://github.com/libusb/libusb/archive/v1.0.22.zip --output v1.0.22.zip && \
|
||||
unzip v1.0.22.zip && rm -rf v1.0.22.zip
|
||||
|
||||
WORKDIR /opt/libusb-1.0.22
|
||||
RUN ./bootstrap.sh && \
|
||||
./configure --disable-udev --enable-shared && \
|
||||
make -j4
|
||||
|
||||
WORKDIR /opt/libusb-1.0.22/libusb
|
||||
RUN /bin/mkdir -p '/usr/local/lib' && \
|
||||
/bin/bash ../libtool --mode=install /usr/bin/install -c libusb-1.0.la '/usr/local/lib' && \
|
||||
/bin/mkdir -p '/usr/local/include/libusb-1.0' && \
|
||||
/usr/bin/install -c -m 644 libusb.h '/usr/local/include/libusb-1.0' && \
|
||||
/bin/mkdir -p '/usr/local/lib/pkgconfig' && \
|
||||
printf "\nexport LD_LIBRARY_PATH=\${LD_LIBRARY_PATH}:/usr/local/lib\n" >> /opt/intel/openvino/bin/setupvars.sh
|
||||
|
||||
WORKDIR /opt/libusb-1.0.22/
|
||||
RUN /usr/bin/install -c -m 644 libusb-1.0.pc '/usr/local/lib/pkgconfig' && \
|
||||
cp /opt/intel/openvino/deployment_tools/inference_engine/external/97-myriad-usbboot.rules /etc/udev/rules.d/ && \
|
||||
ldconfig
|
||||
```
|
||||
2. Run the Docker* image:
|
||||
```sh
|
||||
docker run -it --rm --device-cgroup-rule='c 189:* rmw' -v /dev/bus/usb:/dev/bus/usb <image_name>
|
||||
docker run --device-cgroup-rule='c 189:* rmw' -v /dev/bus/usb:/dev/bus/usb <image_name>
|
||||
```
|
||||
|
||||
#### Option #2
|
||||
Run container in the privileged mode, enable the Docker network configuration as host, and mount all devices to the container:
|
||||
- **Solution #2**:
|
||||
Run container in privileged mode, enable Docker network configuration as host, and mount all devices to container:<br>
|
||||
```sh
|
||||
docker run -it --rm --privileged -v /dev:/dev --network=host <image_name>
|
||||
docker run --privileged -v /dev:/dev --network=host <image_name>
|
||||
```
|
||||
> **NOTES**:
|
||||
> - It is not secure.
|
||||
> - Conflicts with Kubernetes* and other tools that use orchestration and private networks may occur.
|
||||
|
||||
> **Notes**:
|
||||
> - It is not secure
|
||||
> - Conflicts with Kubernetes* and other tools that use orchestration and private networks
|
||||
|
||||
## Use a Docker* Image for Intel® Vision Accelerator Design with Intel® Movidius™ VPUs
|
||||
|
||||
### Build Docker* Image for Intel® Vision Accelerator Design with Intel® Movidius™ VPUs
|
||||
To use the Docker container for inference on Intel® Vision Accelerator Design with Intel® Movidius™ VPUs:
|
||||
|
||||
1. Set up the environment on the host machine, that is going to be used for running Docker*.
|
||||
It is required to execute `hddldaemon`, which is responsible for communication between the HDDL plugin and the board.
|
||||
To learn how to set up the environment (the OpenVINO package or HDDL package must be pre-installed), see [Configuration guide for HDDL device](https://github.com/openvinotoolkit/docker_ci/blob/master/install_guide_vpu_hddl.md) or [Configuration Guide for Intel® Vision Accelerator Design with Intel® Movidius™ VPUs](installing-openvino-linux-ivad-vpu.md).
|
||||
2. Prepare the Docker* image (add the following commands to a Dockerfile).
|
||||
- **Ubuntu 18.04**:
|
||||
1. Set up the environment on the host machine, that is going to be used for running Docker*. It is required to execute `hddldaemon`, which is responsible for communication between the HDDL plugin and the board. To learn how to set up the environment (the OpenVINO package must be pre-installed), see [Configuration Guide for Intel® Vision Accelerator Design with Intel® Movidius™ VPUs](installing-openvino-linux-ivad-vpu.md).
|
||||
2. Prepare the Docker* image. As a base image, you can use the image from the section [Building Docker Image for CPU](#building-for-cpu). To use it for inference on Intel® Vision Accelerator Design with Intel® Movidius™ VPUs you need to rebuild the image with adding the following dependencies:
|
||||
```sh
|
||||
WORKDIR /tmp
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
libboost-filesystem1.65-dev \
|
||||
libboost-thread1.65-dev \
|
||||
libjson-c3 libxxf86vm-dev && \
|
||||
rm -rf /var/lib/apt/lists/* && rm -rf /tmp/*
|
||||
```
|
||||
- **Ubuntu 20.04**:
|
||||
```sh
|
||||
WORKDIR /tmp
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
libboost-filesystem-dev \
|
||||
libboost-thread-dev \
|
||||
libjson-c4 \
|
||||
libxxf86vm-dev && \
|
||||
rm -rf /var/lib/apt/lists/* && rm -rf /tmp/*
|
||||
```
|
||||
- **CentOS 7.6**:
|
||||
```sh
|
||||
WORKDIR /tmp
|
||||
RUN yum update -y && yum install -y \
|
||||
boost-filesystem \
|
||||
boost-thread \
|
||||
boost-program-options \
|
||||
boost-system \
|
||||
boost-chrono \
|
||||
boost-date-time \
|
||||
boost-regex \
|
||||
boost-atomic \
|
||||
json-c \
|
||||
libXxf86vm-devel && \
|
||||
yum clean all && rm -rf /var/cache/yum
|
||||
libboost-filesystem1.65-dev=1.65.1+dfsg-0ubuntu5 \
|
||||
libboost-thread1.65-dev=1.65.1+dfsg-0ubuntu5 \
|
||||
libjson-c3=0.12.1-1.3 libxxf86vm-dev=1:1.1.4-1 && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
```
|
||||
3. Run `hddldaemon` on the host in a separate terminal session using the following command:
|
||||
```sh
|
||||
@@ -243,50 +267,22 @@ $HDDL_INSTALL_DIR/hddldaemon
|
||||
### Run the Docker* Image for Intel® Vision Accelerator Design with Intel® Movidius™ VPUs
|
||||
To run the built Docker* image for Intel® Vision Accelerator Design with Intel® Movidius™ VPUs, use the following command:
|
||||
```sh
|
||||
docker run -it --rm --device=/dev/ion:/dev/ion -v /var/tmp:/var/tmp <image_name>
|
||||
docker run --device=/dev/ion:/dev/ion -v /var/tmp:/var/tmp -ti <image_name>
|
||||
```
|
||||
|
||||
> **NOTES**:
|
||||
> **NOTE**:
|
||||
> - The device `/dev/ion` need to be shared to be able to use ion buffers among the plugin, `hddldaemon` and the kernel.
|
||||
> - Since separate inference tasks share the same HDDL service communication interface (the service creates mutexes and a socket file in `/var/tmp`), `/var/tmp` needs to be mounted and shared among them.
|
||||
|
||||
In some cases, the ion driver is not enabled (for example, due to a newer kernel version or iommu incompatibility). `lsmod | grep myd_ion` returns empty output. To resolve, use the following command:
|
||||
```sh
|
||||
docker run -it --rm --net=host -v /var/tmp:/var/tmp –ipc=host <image_name>
|
||||
docker run --rm --net=host -v /var/tmp:/var/tmp –ipc=host -ti <image_name>
|
||||
```
|
||||
> **NOTES**:
|
||||
> **NOTE**:
|
||||
> - When building docker images, create a user in the docker file that has the same UID and GID as the user which runs hddldaemon on the host.
|
||||
> - Run the application in the docker with this user.
|
||||
> - Alternatively, you can start hddldaemon with the root user on host, but this approach is not recommended.
|
||||
|
||||
### Run Demos in the Docker* Image
|
||||
|
||||
To run the Security Barrier Camera Demo on a specific inference device, run the following commands with the root privileges (additional third-party dependencies will be installed):
|
||||
|
||||
**CPU**:
|
||||
```sh
|
||||
docker run -itu root:root --rm --device=/dev/ion:/dev/ion -v /var/tmp:/var/tmp --device /dev/dri:/dev/dri --device-cgroup-rule='c 189:* rmw' -v /dev/bus/usb:/dev/bus/usb <image_name>
|
||||
/bin/bash -c "apt update && apt install sudo && deployment_tools/demo/demo_security_barrier_camera.sh -d CPU -sample-options -no_show"
|
||||
```
|
||||
|
||||
**GPU**:
|
||||
```sh
|
||||
docker run -itu root:root --rm --device=/dev/ion:/dev/ion -v /var/tmp:/var/tmp --device /dev/dri:/dev/dri --device-cgroup-rule='c 189:* rmw' -v /dev/bus/usb:/dev/bus/usb <image_name>
|
||||
/bin/bash -c "apt update && apt install sudo && deployment_tools/demo/demo_security_barrier_camera.sh -d GPU -sample-options -no_show"
|
||||
```
|
||||
|
||||
**MYRIAD**:
|
||||
```sh
|
||||
docker run -itu root:root --rm --device=/dev/ion:/dev/ion -v /var/tmp:/var/tmp --device /dev/dri:/dev/dri --device-cgroup-rule='c 189:* rmw' -v /dev/bus/usb:/dev/bus/usb <image_name>
|
||||
/bin/bash -c "apt update && apt install sudo && deployment_tools/demo/demo_security_barrier_camera.sh -d MYRIAD -sample-options -no_show"
|
||||
```
|
||||
|
||||
**HDDL**:
|
||||
```sh
|
||||
docker run -itu root:root --rm --device=/dev/ion:/dev/ion -v /var/tmp:/var/tmp --device /dev/dri:/dev/dri --device-cgroup-rule='c 189:* rmw' -v /dev/bus/usb:/dev/bus/usb <image_name>
|
||||
/bin/bash -c "apt update && apt install sudo && deployment_tools/demo/demo_security_barrier_camera.sh -d HDDL -sample-options -no_show"
|
||||
```
|
||||
|
||||
## Use a Docker* Image for FPGA
|
||||
|
||||
Intel will be transitioning to the next-generation programmable deep-learning solution based on FPGAs in order to increase the level of customization possible in FPGA deep-learning. As part of this transition, future standard releases (i.e., non-LTS releases) of Intel® Distribution of OpenVINO™ toolkit will no longer include the Intel® Vision Accelerator Design with an Intel® Arria® 10 FPGA and the Intel® Programmable Acceleration Card with Intel® Arria® 10 GX FPGA.
|
||||
@@ -295,14 +291,12 @@ Intel® Distribution of OpenVINO™ toolkit 2020.3.X LTS release will continue t
|
||||
|
||||
For instructions for previous releases with FPGA Support, see documentation for the [2020.4 version](https://docs.openvinotoolkit.org/2020.4/openvino_docs_install_guides_installing_openvino_docker_linux.html#use_a_docker_image_for_fpga) or lower.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
If you got proxy issues, please setup proxy settings for Docker. See the Proxy section in the [Install the DL Workbench from Docker Hub* ](@ref workbench_docs_Workbench_DG_Install_from_Docker_Hub) topic.
|
||||
## Examples
|
||||
* [ubuntu18_runtime dockerfile](https://docs.openvinotoolkit.org/downloads/ubuntu18_runtime.dockerfile) - Can be used to build OpenVINO™ runtime image containing minimal dependencies needed to use OpenVINO™ in production environment.
|
||||
* [ubuntu18_dev dockerfile](https://docs.openvinotoolkit.org/downloads/ubuntu18_dev.dockerfile) - Can be used to build OpenVINO™ developer image containing full OpenVINO™ package to use in development environment.
|
||||
|
||||
## Additional Resources
|
||||
|
||||
* [DockerHub CI Framework](https://github.com/openvinotoolkit/docker_ci) for Intel® Distribution of OpenVINO™ toolkit. The Framework can generate a Dockerfile, build, test, and deploy an image with the Intel® Distribution of OpenVINO™ toolkit. You can reuse available Dockerfiles, add your layer and customize the image of OpenVINO™ for your needs.
|
||||
|
||||
* Intel® Distribution of OpenVINO™ toolkit home page: [https://software.intel.com/en-us/openvino-toolkit](https://software.intel.com/en-us/openvino-toolkit)
|
||||
|
||||
* OpenVINO™ toolkit documentation: [https://docs.openvinotoolkit.org](https://docs.openvinotoolkit.org)
|
||||
|
||||
@@ -15,75 +15,140 @@ This guide provides the steps for creating a Docker* image with Intel® Distribu
|
||||
- Windows 10*, 64-bit Pro, Enterprise or Education (1607 Anniversary Update, Build 14393 or later) editions
|
||||
- Windows Server* 2016 or higher
|
||||
|
||||
## Prebuilt Images
|
||||
|
||||
Prebuilt images are available on [Docker Hub](https://hub.docker.com/u/openvino).
|
||||
|
||||
## Build a Docker* Image for CPU
|
||||
|
||||
You can use [available Dockerfiles](https://github.com/openvinotoolkit/docker_ci/tree/master/dockerfiles) or generate a Dockerfile with your setting via [DockerHub CI Framework](https://github.com/openvinotoolkit/docker_ci) for Intel® Distribution of OpenVINO™ toolkit.
|
||||
The Framework can generate a Dockerfile, build, test, and deploy an image with the Intel® Distribution of OpenVINO™ toolkit.
|
||||
To build a Docker image, create a `Dockerfile` that contains defined variables and commands required to create an OpenVINO toolkit installation image.
|
||||
|
||||
## Install Additional Dependencies
|
||||
Create your `Dockerfile` using the following example as a template:
|
||||
|
||||
<details>
|
||||
<summary>Click to expand/collapse</summary>
|
||||
|
||||
~~~
|
||||
# escape= `
|
||||
FROM mcr.microsoft.com/windows/servercore:ltsc2019
|
||||
|
||||
# Restore the default Windows shell for correct batch processing.
|
||||
SHELL ["cmd", "/S", "/C"]
|
||||
|
||||
USER ContainerAdministrator
|
||||
|
||||
# Setup Redistributable Libraries for Intel(R) C++ Compiler for Windows*
|
||||
|
||||
RUN powershell.exe -Command `
|
||||
Invoke-WebRequest -URI https://software.intel.com/sites/default/files/managed/59/aa/ww_icl_redist_msi_2018.3.210.zip -Proxy %HTTPS_PROXY% -OutFile "%TMP%\ww_icl_redist_msi_2018.3.210.zip" ; `
|
||||
Expand-Archive -Path "%TMP%\ww_icl_redist_msi_2018.3.210.zip" -DestinationPath "%TMP%\ww_icl_redist_msi_2018.3.210" -Force ; `
|
||||
Remove-Item "%TMP%\ww_icl_redist_msi_2018.3.210.zip" -Force
|
||||
|
||||
RUN %TMP%\ww_icl_redist_msi_2018.3.210\ww_icl_redist_intel64_2018.3.210.msi /quiet /passive /log "%TMP%\redist.log"
|
||||
|
||||
# setup Python
|
||||
ARG PYTHON_VER=python3.7
|
||||
|
||||
RUN powershell.exe -Command `
|
||||
Invoke-WebRequest -URI https://www.python.org/ftp/python/3.7.6/python-3.7.6-amd64.exe -Proxy %HTTPS_PROXY% -OutFile %TMP%\\python-3.7.exe ; `
|
||||
Start-Process %TMP%\\python-3.7.exe -ArgumentList '/passive InstallAllUsers=1 PrependPath=1 TargetDir=c:\\Python37' -Wait ; `
|
||||
Remove-Item %TMP%\\python-3.7.exe -Force
|
||||
|
||||
RUN python -m pip install --upgrade pip
|
||||
RUN python -m pip install cmake
|
||||
|
||||
# download package from external URL
|
||||
ARG package_url=http://registrationcenter-download.intel.com/akdlm/irc_nas/16613/w_openvino_toolkit_p_0000.0.000.exe
|
||||
ARG TEMP_DIR=/temp
|
||||
|
||||
WORKDIR ${TEMP_DIR}
|
||||
ADD ${package_url} ${TEMP_DIR}
|
||||
|
||||
# install product by installation script
|
||||
ARG build_id=0000.0.000
|
||||
ENV INTEL_OPENVINO_DIR C:\intel
|
||||
|
||||
RUN powershell.exe -Command `
|
||||
Start-Process "./*.exe" -ArgumentList '--s --a install --eula=accept --installdir=%INTEL_OPENVINO_DIR% --output=%TMP%\openvino_install_out.log --components=OPENVINO_COMMON,INFERENCE_ENGINE,INFERENCE_ENGINE_SDK,INFERENCE_ENGINE_SAMPLES,OMZ_TOOLS,POT,INFERENCE_ENGINE_CPU,INFERENCE_ENGINE_GPU,MODEL_OPTIMIZER,OMZ_DEV,OPENCV_PYTHON,OPENCV_RUNTIME,OPENCV,DOCS,SETUPVARS,VC_REDIST_2017_X64,icl_redist' -Wait
|
||||
|
||||
ENV INTEL_OPENVINO_DIR C:\intel\openvino_${build_id}
|
||||
|
||||
# Post-installation cleanup
|
||||
RUN rmdir /S /Q "%USERPROFILE%\Downloads\Intel"
|
||||
|
||||
# dev package
|
||||
WORKDIR ${INTEL_OPENVINO_DIR}
|
||||
RUN python -m pip install --no-cache-dir setuptools && `
|
||||
python -m pip install --no-cache-dir -r "%INTEL_OPENVINO_DIR%\python\%PYTHON_VER%\requirements.txt" && `
|
||||
python -m pip install --no-cache-dir -r "%INTEL_OPENVINO_DIR%\python\%PYTHON_VER%\openvino\tools\benchmark\requirements.txt" && `
|
||||
python -m pip install --no-cache-dir torch==1.4.0+cpu torchvision==0.5.0+cpu -f https://download.pytorch.org/whl/torch_stable.html
|
||||
|
||||
WORKDIR ${TEMP_DIR}
|
||||
COPY scripts\install_requirements.bat install_requirements.bat
|
||||
RUN install_requirements.bat %INTEL_OPENVINO_DIR%
|
||||
|
||||
|
||||
WORKDIR ${INTEL_OPENVINO_DIR}\deployment_tools\open_model_zoo\tools\accuracy_checker
|
||||
RUN %INTEL_OPENVINO_DIR%\bin\setupvars.bat && `
|
||||
python -m pip install --no-cache-dir -r "%INTEL_OPENVINO_DIR%\deployment_tools\open_model_zoo\tools\accuracy_checker\requirements.in" && `
|
||||
python "%INTEL_OPENVINO_DIR%\deployment_tools\open_model_zoo\tools\accuracy_checker\setup.py" install
|
||||
|
||||
WORKDIR ${INTEL_OPENVINO_DIR}\deployment_tools\tools\post_training_optimization_toolkit
|
||||
RUN python -m pip install --no-cache-dir -r "%INTEL_OPENVINO_DIR%\deployment_tools\tools\post_training_optimization_toolkit\requirements.txt" && `
|
||||
python "%INTEL_OPENVINO_DIR%\deployment_tools\tools\post_training_optimization_toolkit\setup.py" install
|
||||
|
||||
WORKDIR ${INTEL_OPENVINO_DIR}
|
||||
|
||||
# Post-installation cleanup
|
||||
RUN powershell Remove-Item -Force -Recurse "%TEMP%\*" && `
|
||||
powershell Remove-Item -Force -Recurse "%TEMP_DIR%" && `
|
||||
rmdir /S /Q "%ProgramData%\Package Cache"
|
||||
|
||||
USER ContainerUser
|
||||
|
||||
CMD ["cmd.exe"]
|
||||
~~~
|
||||
|
||||
</details>
|
||||
|
||||
> **NOTE**: Replace direct link to the Intel® Distribution of OpenVINO™ toolkit package to the latest version in the `package_url` variable and modify install package name in the subsequent commands. You can copy the link from the [Intel® Distribution of OpenVINO™ toolkit download page](https://software.seek.intel.com/openvino-toolkit) after registration. Right click the **Offline Installer** button on the download page for Linux in your browser and press **Copy link address**.
|
||||
> **NOTE**: Replace build number of the package in the `build_id` variable according to the name of the downloaded Intel® Distribution of OpenVINO™ toolkit package. For example, for the installation file `w_openvino_toolkit_p_2020.3.333.exe`, the `build_id` variable should have the value `2020.3.333`.
|
||||
|
||||
To build a Docker* image for CPU, run the following command:
|
||||
~~~
|
||||
docker build . -t <image_name> `
|
||||
--build-arg HTTP_PROXY=<http://your_proxy_server.com:port> `
|
||||
--build-arg HTTPS_PROXY=<https://your_proxy_server.com:port>
|
||||
~~~
|
||||
|
||||
## Install additional dependencies
|
||||
### Install CMake
|
||||
To add CMake to the image, add the following commands to the Dockerfile:
|
||||
To add CMake to the image, add the following commands to the `Dockerfile` example above:
|
||||
~~~
|
||||
RUN powershell.exe -Command `
|
||||
Invoke-WebRequest -URI https://cmake.org/files/v3.14/cmake-3.14.7-win64-x64.msi -OutFile %TMP%\\cmake-3.14.7-win64-x64.msi ; `
|
||||
Invoke-WebRequest -URI https://cmake.org/files/v3.14/cmake-3.14.7-win64-x64.msi -Proxy %HTTPS_PROXY% -OutFile %TMP%\\cmake-3.14.7-win64-x64.msi ; `
|
||||
Start-Process %TMP%\\cmake-3.14.7-win64-x64.msi -ArgumentList '/quiet /norestart' -Wait ; `
|
||||
Remove-Item %TMP%\\cmake-3.14.7-win64-x64.msi -Force
|
||||
|
||||
RUN SETX /M PATH "C:\Program Files\CMake\Bin;%PATH%"
|
||||
~~~
|
||||
In case of proxy issues, please add the `ARG HTTPS_PROXY` and `-Proxy %%HTTPS_PROXY%` settings to the `powershell.exe` command to the Dockerfile. Then build a docker image:
|
||||
~~~
|
||||
docker build . -t <image_name> `
|
||||
--build-arg HTTPS_PROXY=<https://your_proxy_server:port>
|
||||
~~~
|
||||
|
||||
### Install Microsoft Visual Studio* Build Tools
|
||||
You can add Microsoft Visual Studio Build Tools* to a Windows* OS Docker image. Available options are to use offline installer for Build Tools
|
||||
(follow the [Instruction for the offline installer](https://docs.microsoft.com/en-us/visualstudio/install/create-an-offline-installation-of-visual-studio?view=vs-2019)) or
|
||||
to use the online installer for Build Tools (follow [Instruction for the online installer](https://docs.microsoft.com/en-us/visualstudio/install/build-tools-container?view=vs-2019)).
|
||||
You can add Microsoft Visual Studio Build Tools* to Windows* OS Docker image. Available options are to use offline installer for Build Tools
|
||||
(follow [Instruction for the offline installer](https://docs.microsoft.com/en-us/visualstudio/install/create-an-offline-installation-of-visual-studio?view=vs-2019) or
|
||||
to use online installer for Build Tools (follow [Instruction for the online installer](https://docs.microsoft.com/en-us/visualstudio/install/build-tools-container?view=vs-2019).
|
||||
Microsoft Visual Studio Build Tools* are licensed as a supplement your existing Microsoft Visual Studio* license.
|
||||
Any images built with these tools should be for your personal use or for use in your organization in accordance with your existing Visual Studio* and Windows* licenses.
|
||||
|
||||
To add MSBuild 2019 to the image, add the following commands to the Dockerfile:
|
||||
~~~
|
||||
RUN powershell.exe -Command Invoke-WebRequest -URI https://aka.ms/vs/16/release/vs_buildtools.exe -OutFile %TMP%\\vs_buildtools.exe
|
||||
|
||||
RUN %TMP%\\vs_buildtools.exe --quiet --norestart --wait --nocache `
|
||||
--installPath "C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools" `
|
||||
--add Microsoft.VisualStudio.Workload.MSBuildTools `
|
||||
--add Microsoft.VisualStudio.Workload.UniversalBuildTools `
|
||||
--add Microsoft.VisualStudio.Workload.VCTools --includeRecommended `
|
||||
--remove Microsoft.VisualStudio.Component.Windows10SDK.10240 `
|
||||
--remove Microsoft.VisualStudio.Component.Windows10SDK.10586 `
|
||||
--remove Microsoft.VisualStudio.Component.Windows10SDK.14393 `
|
||||
--remove Microsoft.VisualStudio.Component.Windows81SDK || IF "%ERRORLEVEL%"=="3010" EXIT 0 && powershell set-executionpolicy remotesigned
|
||||
~~~
|
||||
In case of proxy issues, please use an offline installer for Build Tools (follow [Instruction for the offline installer](https://docs.microsoft.com/en-us/visualstudio/install/create-an-offline-installation-of-visual-studio?view=vs-2019).
|
||||
|
||||
## Run the Docker* Image for CPU
|
||||
|
||||
To install the OpenVINO toolkit from the prepared Docker image, run the image with the following command (currently support only CPU target):
|
||||
To install the OpenVINO toolkit from the prepared Docker image, run the image with the following command:
|
||||
~~~
|
||||
docker run -it --rm <image_name>
|
||||
docker run -it <image_name>
|
||||
~~~
|
||||
|
||||
If you want to try some demos then run image with the root privileges (some additional 3-rd party dependencies will be installed):
|
||||
~~~
|
||||
docker run -itu ContainerAdministrator --rm <image_name> cmd /S /C "cd deployment_tools\demo && demo_security_barrier_camera.bat -d CPU -sample-options -no_show"
|
||||
~~~
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
If you got proxy issues, please setup proxy settings for Docker. See the Proxy section in the [Install the DL Workbench from Docker Hub* ](@ref workbench_docs_Workbench_DG_Install_from_Docker_Hub) topic.
|
||||
## Examples
|
||||
* [winserver2019_runtime dockerfile](https://docs.openvinotoolkit.org/downloads/winserver2019_runtime.dockerfile) - Can be used to build OpenVINO™ runtime image containing minimal dependencies needed to use OpenVINO™ in production environment.
|
||||
* [winserver2019_dev dockerfile](https://docs.openvinotoolkit.org/downloads/winserver2019_dev.dockerfile) - Can be used to build OpenVINO™ developer image containing full OpenVINO™ package to use in development environment.
|
||||
|
||||
## Additional Resources
|
||||
|
||||
* [DockerHub CI Framework](https://github.com/openvinotoolkit/docker_ci) for Intel® Distribution of OpenVINO™ toolkit. The Framework can generate a Dockerfile, build, test, and deploy an image with the Intel® Distribution of OpenVINO™ toolkit. You can reuse available Dockerfiles, add your layer and customize the image of OpenVINO™ for your needs.
|
||||
|
||||
* Intel® Distribution of OpenVINO™ toolkit home page: [https://software.intel.com/en-us/openvino-toolkit](https://software.intel.com/en-us/openvino-toolkit)
|
||||
|
||||
* OpenVINO™ toolkit documentation: [https://docs.openvinotoolkit.org](https://docs.openvinotoolkit.org)
|
||||
|
||||
@@ -1,14 +0,0 @@
|
||||
# Install From Images and Repositories {#openvino_docs_install_guides_installing_openvino_images}
|
||||
|
||||
You may install Intel® Distribution of OpenVINO™ toolkit from images and repositories using the **Install OpenVINO™** button above or directly from the [Get the Intel® Distribution of OpenVINO™ Toolkit](https://software.intel.com/content/www/us/en/develop/tools/openvino-toolkit/download.html) page. Use the documentation below if you need additional support:
|
||||
|
||||
* [Docker](installing-openvino-docker-linux.md)
|
||||
* [Docker with DL Workbench](@ref workbench_docs_Workbench_DG_Install_from_Docker_Hub)
|
||||
* [APT](installing-openvino-apt.md)
|
||||
* [YUM](installing-openvino-yum.md)
|
||||
* [Anaconda Cloud](installing-openvino-conda.md)
|
||||
* [Yocto](installing-openvino-yocto.md)
|
||||
* [PyPI](installing-openvino-pip.md)
|
||||
|
||||
The open source version is available in the [OpenVINO™ toolkit GitHub repository](https://github.com/openvinotoolkit/openvino) and you can build it for supported platforms using the <a href="https://github.com/openvinotoolkit/openvino/wiki/BuildingCode">Inference Engine Build Instructions</a>.
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
|
||||
## Introduction
|
||||
|
||||
OpenVINO™ toolkit is a comprehensive toolkit for quickly developing applications and solutions that solve a variety of tasks including emulation of human vision, automatic speech recognition, natural language processing, recommendation systems, and many others. Based on latest generations of artificial neural networks, including Convolutional Neural Networks (CNNs), recurrent and attention-based networks, the toolkit extends computer vision and non-vision workloads across Intel® hardware, maximizing performance. It accelerates applications with high-performance, AI and deep learning inference deployed from edge to cloud.
|
||||
The Intel® Distribution of OpenVINO™ toolkit quickly deploys applications and solutions that emulate human vision. Based on Convolutional Neural Networks (CNN), the toolkit extends computer vision (CV) workloads across Intel® hardware, maximizing performance. The Intel® Distribution of OpenVINO™ toolkit includes the Intel® Deep Learning Deployment Toolkit (Intel® DLDT).
|
||||
|
||||
The Intel® Distribution of OpenVINO™ toolkit for Linux\*:
|
||||
- Enables CNN-based deep learning inference on the edge
|
||||
@@ -28,21 +28,7 @@ The Intel® Distribution of OpenVINO™ toolkit for Linux\*:
|
||||
| [Inference Engine Code Samples](../IE_DG/Samples_Overview.md) | A set of simple console applications demonstrating how to utilize specific OpenVINO capabilities in an application and how to perform specific tasks, such as loading a model, running inference, querying specific device capabilities, and more. |
|
||||
| [Demo Applications](@ref omz_demos_README) | A set of simple console applications that provide robust application templates to help you implement specific deep learning scenarios. |
|
||||
| Additional Tools | A set of tools to work with your models including [Accuracy Checker utility](@ref omz_tools_accuracy_checker_README), [Post-Training Optimization Tool Guide](@ref pot_README), [Model Downloader](@ref omz_tools_downloader_README) and other |
|
||||
| [Documentation for Pre-Trained Models ](@ref omz_models_intel_index) | Documentation for the pre-trained models available in the [Open Model Zoo repo](https://github.com/opencv/open_model_zoo). |
|
||||
| Deep Learning Streamer (DL Streamer) | Streaming analytics framework, based on GStreamer, for constructing graphs of media analytics components. For the DL Streamer documentation, see [DL Streamer Samples](@ref gst_samples_README), [API Reference](https://openvinotoolkit.github.io/dlstreamer_gst/), [Elements](https://github.com/opencv/gst-video-analytics/wiki/Elements), [Tutorial](https://github.com/opencv/gst-video-analytics/wiki/DL%20Streamer%20Tutorial). |
|
||||
|
||||
**Could Be Optionally Installed**
|
||||
|
||||
[Deep Learning Workbench](@ref workbench_docs_Workbench_DG_Introduction) (DL Workbench) is a platform built upon OpenVINO™ and provides a web-based graphical environment that enables you to optimize, fine-tune, analyze, visualize, and compare performance of deep learning models on various Intel® architecture
|
||||
configurations. In the DL Workbench, you can use most of OpenVINO™ toolkit components:
|
||||
* [Model Downloader](@ref omz_tools_downloader_README)
|
||||
* [Intel® Open Model Zoo](@ref omz_models_intel_index)
|
||||
* [Model Optimizer](../MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md)
|
||||
* [Post-training Optimization Tool](@ref pot_README)
|
||||
* [Accuracy Checker](@ref omz_tools_accuracy_checker_README)
|
||||
* [Benchmark Tool](../../inference-engine/samples/benchmark_app/README.md)
|
||||
|
||||
Proceed to an [easy installation from Docker](@ref workbench_docs_Workbench_DG_Install_from_Docker_Hub) to get started.
|
||||
| [Documentation for Pre-Trained Models ](@ref omz_models_intel_index) | Documentation for the pre-trained models available in the [Open Model Zoo repo](https://github.com/opencv/open_model_zoo) |
|
||||
|
||||
## System Requirements
|
||||
|
||||
@@ -98,25 +84,28 @@ If you downloaded the package file to the current user's `Downloads` directory:
|
||||
```sh
|
||||
cd ~/Downloads/
|
||||
```
|
||||
By default, the file is saved as `l_openvino_toolkit_p_<version>.tgz`.
|
||||
By default, the file is saved as `l_openvino_toolkit_p_<version>.tgz`.
|
||||
|
||||
3. Unpack the .tgz file:
|
||||
```sh
|
||||
tar -xvzf l_openvino_toolkit_p_<version>.tgz
|
||||
```
|
||||
The files are unpacked to the `l_openvino_toolkit_p_<version>` directory.
|
||||
The files are unpacked to the `l_openvino_toolkit_p_<version>` directory.
|
||||
|
||||
4. Go to the `l_openvino_toolkit_p_<version>` directory:
|
||||
```sh
|
||||
cd l_openvino_toolkit_p_<version>
|
||||
```
|
||||
If you have a previous version of the Intel Distribution of OpenVINO
|
||||
If you have a previous version of the Intel Distribution of OpenVINO
|
||||
toolkit installed, rename or delete these two directories:
|
||||
- `~/inference_engine_samples_build`
|
||||
- `~/openvino_models`
|
||||
|
||||
**Installation Notes:**
|
||||
- Choose an installation option and run the related script as root.
|
||||
- You can use either a GUI installation wizard or command-line instructions (CLI).
|
||||
- Screenshots are provided for the GUI, but not for CLI. The following information also applies to CLI and will be helpful to your installation where you will be presented with the same choices and tasks.
|
||||
**Installation Notes:**
|
||||
|
||||
- Choose an installation option and run the related script as root.
|
||||
- You can use either a GUI installation wizard or command-line instructions (CLI).
|
||||
- Screenshots are provided for the GUI, but not for CLI. The following information also applies to CLI and will be helpful to your installation where you will be presented with the same choices and tasks.
|
||||
|
||||
5. Choose your installation option:
|
||||
- **Option 1:** GUI Installation Wizard:
|
||||
@@ -127,15 +116,6 @@ sudo ./install_GUI.sh
|
||||
```sh
|
||||
sudo ./install.sh
|
||||
```
|
||||
- **Option 3:** Command-Line Silent Instructions:
|
||||
```sh
|
||||
sudo sed -i 's/decline/accept/g' silent.cfg
|
||||
sudo ./install.sh -s silent.cfg
|
||||
```
|
||||
You can select which OpenVINO components will be installed by modifying the `COMPONENTS` parameter in the `silent.cfg` file. For example, to install only CPU runtime for the Inference Engine, set
|
||||
`COMPONENTS=intel-openvino-ie-rt-cpu__x86_64` in `silent.cfg`.
|
||||
To get a full list of available components for installation, run the `./install.sh --list_components` command from the unpacked OpenVINO™ toolkit package.
|
||||
|
||||
6. Follow the instructions on your screen. Watch for informational
|
||||
messages such as the following in case you must complete additional
|
||||
steps:
|
||||
@@ -175,7 +155,7 @@ cd /opt/intel/openvino_2021/install_dependencies
|
||||
```sh
|
||||
sudo -E ./install_openvino_dependencies.sh
|
||||
```
|
||||
The dependencies are installed. Continue to the next section to set your environment variables.
|
||||
The dependencies are installed. Continue to the next section to set your environment variables.
|
||||
|
||||
## <a name="set-the-environment-variables"></a>Set the Environment Variables
|
||||
|
||||
@@ -298,18 +278,20 @@ cd /opt/intel/openvino_2021/deployment_tools/demo
|
||||
```sh
|
||||
./demo_squeezenet_download_convert_run.sh
|
||||
```
|
||||
This verification script downloads a SqueezeNet model, uses the Model Optimizer to convert the model to the .bin and .xml Intermediate Representation (IR) files. The Inference Engine requires this model conversion so it can use the IR as input and achieve optimum performance on Intel hardware.<br>
|
||||
This verification script builds the [Image Classification Sample Async](../../inference-engine/samples/classification_sample_async/README.md) application and run it with the `car.png` image located in the demo directory. When the verification script completes, you will have the label and confidence for the top-10 categories:
|
||||

|
||||
This verification script downloads a SqueezeNet model, uses the Model Optimizer to convert the model to the .bin and .xml Intermediate Representation (IR) files. The Inference Engine requires this model conversion so it can use the IR as input and achieve optimum performance on Intel hardware.<br>
|
||||
This verification script builds the [Image Classification Sample Async](../../inference-engine/samples/classification_sample_async/README.md) application and run it with the `car.png` image located in the demo directory. When the verification script completes, you will have the label and confidence for the top-10 categories:
|
||||

|
||||
|
||||
3. Run the **Inference Pipeline verification script**:
|
||||
```sh
|
||||
./demo_security_barrier_camera.sh
|
||||
```
|
||||
This script downloads three pre-trained model IRs, builds the [Security Barrier Camera Demo](@ref omz_demos_security_barrier_camera_demo_README) application, and runs it with the downloaded models and the `car_1.bmp` image from the `demo` directory to show an inference pipeline. The verification script uses vehicle recognition in which vehicle attributes build on each other to narrow in on a specific attribute.<br>
|
||||
First, an object is identified as a vehicle. This identification is used as input to the next model, which identifies specific vehicle attributes, including the license plate. Finally, the attributes identified as the license plate are used as input to the third model, which recognizes specific characters in the license plate.<br>
|
||||
When the verification script completes, you will see an image that displays the resulting frame with detections rendered as bounding boxes, and text:
|
||||

|
||||
This script downloads three pre-trained model IRs, builds the [Security Barrier Camera Demo](@ref omz_demos_security_barrier_camera_demo_README) application, and runs it with the downloaded models and the `car_1.bmp` image from the `demo` directory to show an inference pipeline. The verification script uses vehicle recognition in which vehicle attributes build on each other to narrow in on a specific attribute.
|
||||
|
||||
First, an object is identified as a vehicle. This identification is used as input to the next model, which identifies specific vehicle attributes, including the license plate. Finally, the attributes identified as the license plate are used as input to the third model, which recognizes specific characters in the license plate.
|
||||
|
||||
When the verification script completes, you will see an image that displays the resulting frame with detections rendered as bounding boxes, and text:
|
||||

|
||||
|
||||
4. Close the image viewer window to complete the verification script.
|
||||
|
||||
@@ -340,15 +322,20 @@ sudo -E su
|
||||
```sh
|
||||
./install_NEO_OCL_driver.sh
|
||||
```
|
||||
The drivers are not included in the package and the script downloads them. Make sure you have the internet connection for this step.<br>
|
||||
The script compares the driver version on the system to the current version. If the driver version on the system is higher or equal to the current version, the script does
|
||||
not install a new driver. If the version of the driver is lower than the current version, the script uninstalls the lower and installs the current version with your permission:
|
||||

|
||||
Higher hardware versions require a higher driver version, namely 20.35 instead of 19.41. If the script fails to uninstall the driver, uninstall it manually. During the script execution, you may see the following command line output:
|
||||
```sh
|
||||
Add OpenCL user to video group
|
||||
```
|
||||
Ignore this suggestion and continue.
|
||||
The drivers are not included in the package and the script downloads them. Make sure you have the
|
||||
internet connection for this step.
|
||||
|
||||
The script compares the driver version on the system to the current version.
|
||||
If the driver version on the system is higher or equal to the current version, the script does
|
||||
not install a new driver.
|
||||
If the version of the driver is lower than the current version, the script uninstalls the lower
|
||||
and installs the current version with your permission:
|
||||

|
||||
Higher hardware versions require a higher driver version, namely 20.35 instead of 19.41.
|
||||
If the script fails to uninstall the driver, uninstall it manually.
|
||||
During the script execution, you may see the following command line output:
|
||||
- Add OpenCL user to video group
|
||||
Ignore this suggestion and continue.
|
||||
4. **Optional** Install header files to allow compiling a new code. You can find the header files at [Khronos OpenCL™ API Headers](https://github.com/KhronosGroup/OpenCL-Headers.git).
|
||||
|
||||
## <a name="additional-NCS-steps"></a>Steps for Intel® Neural Compute Stick 2
|
||||
@@ -359,7 +346,8 @@ These steps are only required if you want to perform inference on Intel® Movidi
|
||||
```sh
|
||||
sudo usermod -a -G users "$(whoami)"
|
||||
```
|
||||
Log out and log in for it to take effect.
|
||||
Log out and log in for it to take effect.
|
||||
|
||||
2. To perform inference on Intel® Neural Compute Stick 2, install the USB rules as follows:
|
||||
```sh
|
||||
sudo cp /opt/intel/openvino_2021/inference_engine/external/97-myriad-usbboot.rules /etc/udev/rules.d/
|
||||
|
||||
@@ -31,19 +31,6 @@ The following components are installed by default:
|
||||
| Additional Tools | A set of tools to work with your models including [Accuracy Checker utility](@ref omz_tools_accuracy_checker_README), [Post-Training Optimization Tool Guide](@ref pot_README), [Model Downloader](@ref omz_tools_downloader_README) and other |
|
||||
| [Documentation for Pre-Trained Models ](@ref omz_models_intel_index) | Documentation for the pre-trained models available in the [Open Model Zoo repo](https://github.com/opencv/open_model_zoo) |
|
||||
|
||||
**Could Be Optionally Installed**
|
||||
|
||||
[Deep Learning Workbench](@ref workbench_docs_Workbench_DG_Introduction) (DL Workbench) is a platform built upon OpenVINO™ and provides a web-based graphical environment that enables you to optimize, fine-tune, analyze, visualize, and compare performance of deep learning models on various Intel® architecture
|
||||
configurations. In the DL Workbench, you can use most of OpenVINO™ toolkit components:
|
||||
* [Model Downloader](@ref omz_tools_downloader_README)
|
||||
* [Intel® Open Model Zoo](@ref omz_models_intel_index)
|
||||
* [Model Optimizer](../MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md)
|
||||
* [Post-training Optimization Tool](@ref pot_README)
|
||||
* [Accuracy Checker](@ref omz_tools_accuracy_checker_README)
|
||||
* [Benchmark Tool](../../inference-engine/samples/benchmark_app/README.md)
|
||||
|
||||
Proceed to an [easy installation from Docker](@ref workbench_docs_Workbench_DG_Install_from_Docker_Hub) to get started.
|
||||
|
||||
## Development and Target Platform
|
||||
|
||||
The development and target platforms have the same requirements, but you can select different components during the installation, based on your intended use.
|
||||
|
||||
@@ -13,64 +13,55 @@ This guide provides installation steps for the Intel® distribution of OpenVINO
|
||||
|
||||
## Install the Runtime Package Using the PyPI Repository
|
||||
|
||||
### Step 1. Set up and update pip to the highest version
|
||||
|
||||
Run the command below:
|
||||
```sh
|
||||
python3 -m pip install --upgrade pip
|
||||
```
|
||||
|
||||
### Step 2. Install the Intel® distribution of OpenVINO™ toolkit
|
||||
|
||||
Run the command below:
|
||||
1. Set up and update pip to the highest version:
|
||||
```sh
|
||||
python3 -m pip install --upgrade pip
|
||||
```
|
||||
2. Install the Intel® distribution of OpenVINO™ toolkit:
|
||||
```sh
|
||||
pip install openvino-python
|
||||
```
|
||||
|
||||
### Step 3. Add PATH to environment variables
|
||||
|
||||
Run a command for your operating system:
|
||||
- Ubuntu 18.04 and macOS:
|
||||
```sh
|
||||
export LD_LIBRARY_PATH=<library_dir>:${LD_LIBRARY_PATH}
|
||||
```
|
||||
- Windows* 10:
|
||||
```sh
|
||||
set PATH=<library_dir>;%PATH%
|
||||
```
|
||||
To find `library_dir`:
|
||||
**Ubuntu, macOS**:
|
||||
- Standard user:
|
||||
```sh
|
||||
echo $(python3 -m site --user-base)/lib
|
||||
```
|
||||
- Root or sudo user:
|
||||
```sh
|
||||
/usr/local/lib
|
||||
```
|
||||
- Virtual environments or custom Python installations (from sources or tarball):
|
||||
```sh
|
||||
echo $(which python3)/../../lib
|
||||
```
|
||||
**Windows**:
|
||||
- Standard Python:
|
||||
```sh
|
||||
python -c "import os, sys; print((os.path.dirname(sys.executable))+'\Library\\bin')"
|
||||
```
|
||||
- Virtual environments or custom Python installations (from sources or tarball):
|
||||
```sh
|
||||
python -c "import os, sys; print((os.path.dirname(sys.executable))+'\..\Library\\bin')"
|
||||
```
|
||||
|
||||
### Step 4. Verify that the package is installed
|
||||
|
||||
Run the command below:
|
||||
```sh
|
||||
python3 -c "import openvino"
|
||||
```
|
||||
3. Add PATH to environment variables.
|
||||
- Ubuntu* 18.04 and macOS*:
|
||||
```sh
|
||||
export LD_LIBRARY_PATH=<library_dir>:${LD_LIBRARY_PATH}
|
||||
```
|
||||
- Windows* 10:
|
||||
```sh
|
||||
set PATH=<library_dir>;%PATH%
|
||||
```
|
||||
How to find `library_dir`:
|
||||
- Ubuntu\*, macOS\*:
|
||||
- Standard user:
|
||||
```sh
|
||||
echo $(python3 -m site --user-base)/lib
|
||||
```
|
||||
- Root or sudo user:
|
||||
```sh
|
||||
/usr/local/lib
|
||||
```
|
||||
- Virtual environments or custom Python installations (from sources or tarball):
|
||||
```sh
|
||||
echo $(which python3)/../../lib
|
||||
```
|
||||
- Windows\*:
|
||||
- Standard Python:
|
||||
```sh
|
||||
python -c "import os, sys; print((os.path.dirname(sys.executable))+'\Library\\bin')"
|
||||
```
|
||||
- Virtual environments or custom Python installations (from sources or tarball):
|
||||
```sh
|
||||
python -c "import os, sys; print((os.path.dirname(sys.executable))+'\..\Library\\bin')"
|
||||
```
|
||||
4. Verify that the package is installed:
|
||||
```sh
|
||||
python3 -c "import openvino"
|
||||
```
|
||||
|
||||
Now you are ready to develop and run your application.
|
||||
|
||||
|
||||
## Additional Resources
|
||||
|
||||
- [Intel® Distribution of OpenVINO™ toolkit](https://software.intel.com/en-us/openvino-toolkit).
|
||||
|
||||
@@ -63,19 +63,6 @@ The following components are installed by default:
|
||||
| Additional Tools | A set of tools to work with your models including [Accuracy Checker utility](@ref omz_tools_accuracy_checker_README), [Post-Training Optimization Tool Guide](@ref pot_README), [Model Downloader](@ref omz_tools_downloader_README) and other |
|
||||
| [Documentation for Pre-Trained Models ](@ref omz_models_intel_index) | Documentation for the pre-trained models available in the [Open Model Zoo repo](https://github.com/opencv/open_model_zoo) |
|
||||
|
||||
**Could Be Optionally Installed**
|
||||
|
||||
[Deep Learning Workbench](@ref workbench_docs_Workbench_DG_Introduction) (DL Workbench) is a platform built upon OpenVINO™ and provides a web-based graphical environment that enables you to optimize, fine-tune, analyze, visualize, and compare performance of deep learning models on various Intel® architecture
|
||||
configurations. In the DL Workbench, you can use most of OpenVINO™ toolkit components:
|
||||
* [Model Downloader](@ref omz_tools_downloader_README)
|
||||
* [Intel® Open Model Zoo](@ref omz_models_intel_index)
|
||||
* [Model Optimizer](../MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md)
|
||||
* [Post-training Optimization Tool](@ref pot_README)
|
||||
* [Accuracy Checker](@ref omz_tools_accuracy_checker_README)
|
||||
* [Benchmark Tool](../../inference-engine/samples/benchmark_app/README.md)
|
||||
|
||||
Proceed to an [easy installation from Docker](@ref workbench_docs_Workbench_DG_Install_from_Docker_Hub) to get started.
|
||||
|
||||
### System Requirements
|
||||
|
||||
**Hardware**
|
||||
|
||||
@@ -1,143 +0,0 @@
|
||||
# OpenVINO™ Model Server {#openvino_docs_ovms}
|
||||
|
||||
OpenVINO™ Model Server (OVMS) is a scalable, high-performance solution for serving machine learning models optimized for Intel® architectures.
|
||||
The server provides an inference service via gRPC or REST API - making it easy to deploy new algorithms and AI experiments using the same
|
||||
architecture as [TensorFlow* Serving](https://github.com/tensorflow/serving) for any models trained in a framework that is supported
|
||||
by [OpenVINO](https://software.intel.com/en-us/openvino-toolkit).
|
||||
|
||||
The server implements gRPC and REST API framework with data serialization and deserialization using TensorFlow Serving API,
|
||||
and OpenVINO™ as the inference execution provider. Model repositories may reside on a locally accessible file system (for example, NFS),
|
||||
Google Cloud Storage\* (GCS), Amazon S3\*, MinIO\*, or Azure Blob Storage\*.
|
||||
|
||||
OVMS is now implemented in C++ and provides much higher scalability compared to its predecessor in the Python version.
|
||||
You can take advantage of all the power of Xeon® CPU capabilities or AI accelerators and expose it over the network interface.
|
||||
Read the [release notes](https://github.com/openvinotoolkit/model_server/releases) to find out what's new in the C++ version.
|
||||
|
||||
Review the [Architecture Concept](https://github.com/openvinotoolkit/model_server/blob/main/docs/architecture.md) document for more details.
|
||||
|
||||
A few key features:
|
||||
- Support for multiple frameworks. Serve models trained in popular formats such as Caffe\*, TensorFlow\*, MXNet\*, and ONNX*.
|
||||
- Deploy new [model versions](https://github.com/openvinotoolkit/model_server/blob/main/docs/docker_container.md#model-version-policy) without changing client code.
|
||||
- Support for AI accelerators including [Intel Movidius Myriad VPUs](../IE_DG/supported_plugins/VPU),
|
||||
[GPU](../IE_DG/supported_plugins/CL_DNN), and [HDDL](../IE_DG/supported_plugins/HDDL).
|
||||
- The server can be enabled both on [Bare Metal Hosts](https://github.com/openvinotoolkit/model_server/blob/main/docs/host.md) or in
|
||||
[Docker* containers](https://github.com/openvinotoolkit/model_server/blob/main/docs/docker_container.md).
|
||||
- [Kubernetes deployments](https://github.com/openvinotoolkit/model_server/blob/main/deploy). The server can be deployed in a Kubernetes cluster allowing the inference service to scale horizontally and ensure high availability.
|
||||
- [Model reshaping](https://github.com/openvinotoolkit/model_server/blob/main/docs/docker_container.md#model-reshaping). The server supports reshaping models in runtime.
|
||||
- [Model ensemble](https://github.com/openvinotoolkit/model_server/blob/main/docs/ensemble_scheduler.md) (preview). Connect multiple models to deploy complex processing solutions and reduce overhead of sending data back and forth.
|
||||
|
||||
> **NOTE**: OVMS has been tested on CentOS\* and Ubuntu\*. Publically released [Docker images](https://hub.docker.com/r/openvino/model_server) are based on CentOS.
|
||||
|
||||
## Build OpenVINO Model Server
|
||||
|
||||
1. Go to the root directory of the repository.
|
||||
|
||||
2. Build the Docker image with the command below:
|
||||
```bash
|
||||
make docker_build
|
||||
```
|
||||
|
||||
The command generates:
|
||||
* Image tagged as `openvino/model_server:latest` with CPU, NCS, and HDDL support
|
||||
* Image tagged as `openvino/model_server:latest-gpu` with CPU, NCS, HDDL, and iGPU support
|
||||
* `.tar.gz` release package with OVMS binary and necessary libraries in the `./dist` directory.
|
||||
|
||||
The release package is compatible with Linux machines on which `glibc` version is greater than or equal to the build image version.
|
||||
For debugging, the command also generates an image with a suffix `-build`, namely `openvino/model_server-build:latest`.
|
||||
|
||||
> **NOTE**: Images include OpenVINO 2021.1 release.
|
||||
|
||||
|
||||
## Run OpenVINO Model Server
|
||||
|
||||
Find a detailed description of how to use the OpenVINO Model Server in the [OVMS Quickstart](https://github.com/openvinotoolkit/model_server/blob/main/docs/ovms_quickstart.md).
|
||||
|
||||
|
||||
For more detailed guides on using the Model Server in various scenarios, visit the links below:
|
||||
|
||||
* [Models repository configuration](https://github.com/openvinotoolkit/model_server/blob/main/docs/models_repository.md)
|
||||
|
||||
* [Using a Docker container](https://github.com/openvinotoolkit/model_server/blob/main/docs/docker_container.md)
|
||||
|
||||
* [Landing on bare metal or virtual machine](https://github.com/openvinotoolkit/model_server/blob/main/docs/host.md)
|
||||
|
||||
* [Performance tuning](https://github.com/openvinotoolkit/model_server/blob/main/docs/performance_tuning.md)
|
||||
|
||||
* [Model Ensemble Scheduler](https://github.com/openvinotoolkit/model_server/blob/main/docs/ensemble_scheduler.md)
|
||||
|
||||
|
||||
## API Documentation
|
||||
|
||||
### GRPC
|
||||
|
||||
OpenVINO™ Model Server gRPC API is documented in the proto buffer files in [tensorflow_serving_api](https://github.com/tensorflow/serving/tree/r2.2/tensorflow_serving/apis).
|
||||
|
||||
> **NOTE:** The implementations for `Predict`, `GetModelMetadata`, and `GetModelStatus` function calls are currently available.
|
||||
> These are the most generic function calls and should address most of the usage scenarios.
|
||||
|
||||
[Predict proto](https://github.com/tensorflow/serving/blob/r2.2/tensorflow_serving/apis/predict.proto) defines two message specifications: `PredictRequest` and `PredictResponse` used while calling Prediction endpoint.
|
||||
* `PredictRequest` specifies information about the model spec, that is name and version, and a map of input data serialized via
|
||||
[TensorProto](https://github.com/tensorflow/tensorflow/blob/r2.2/tensorflow/core/framework/tensor.proto) to a string format.
|
||||
* `PredictResponse` includes a map of outputs serialized by
|
||||
[TensorProto](https://github.com/tensorflow/tensorflow/blob/r2.2/tensorflow/core/framework/tensor.proto) and information about the used model spec.
|
||||
|
||||
[Get Model Metadata proto](https://github.com/tensorflow/serving/blob/r2.2/tensorflow_serving/apis/get_model_metadata.proto) defines three message definitions used while calling Metadata endpoint:
|
||||
`SignatureDefMap`, `GetModelMetadataRequest`, `GetModelMetadataResponse`.
|
||||
|
||||
A function call `GetModelMetadata` accepts model spec information as input and returns Signature Definition content in the format similar to TensorFlow Serving.
|
||||
|
||||
[Get Model Status proto](https://github.com/tensorflow/serving/blob/r2.2/tensorflow_serving/apis/get_model_status.proto) defines three message definitions used while calling Status endpoint:
|
||||
`GetModelStatusRequest`, `ModelVersionStatus`, `GetModelStatusResponse` that report all exposed versions including their state in their lifecycle.
|
||||
|
||||
Refer to the [example client code](https://github.com/openvinotoolkit/model_server/blob/main/example_client) to learn how to use this API and submit the requests using the gRPC interface.
|
||||
|
||||
Using the gRPC interface is recommended for optimal performance due to its faster implementation of input data deserialization. It enables you to achieve lower latency, especially with larger input messages like images.
|
||||
|
||||
### REST
|
||||
|
||||
OpenVINO™ Model Server RESTful API follows the documentation from the [TensorFlow Serving REST API](https://www.tensorflow.org/tfx/serving/api_rest).
|
||||
|
||||
Both row and column format of the requests are implemented.
|
||||
|
||||
> **NOTE**: Just like with gRPC, only the implementations for `Predict`, `GetModelMetadata`, and `GetModelStatus` function calls are currently available.
|
||||
|
||||
Only the numerical data types are supported.
|
||||
|
||||
Review the exemplary clients below to find out more how to connect and run inference requests.
|
||||
|
||||
REST API is recommended when the primary goal is in reducing the number of client side Python dependencies and simpler application code.
|
||||
|
||||
|
||||
## Known Limitations
|
||||
|
||||
* Currently, `Predict`, `GetModelMetadata`, and `GetModelStatus` calls are implemented using the TensorFlow Serving API.
|
||||
* `Classify`, `Regress`, and `MultiInference` are not included.
|
||||
* `Output_filter` is not effective in the `Predict` call. All outputs defined in the model are returned to the clients.
|
||||
|
||||
## OpenVINO Model Server Contribution Policy
|
||||
|
||||
* All contributed code must be compatible with the [Apache 2](https://www.apache.org/licenses/LICENSE-2.0) license.
|
||||
|
||||
* All changes have to pass linter, unit, and functional tests.
|
||||
|
||||
* All new features need to be covered by tests.
|
||||
|
||||
|
||||
## References
|
||||
|
||||
* [Speed and Scale AI Inference Operations Across Multiple Architectures - webinar recording](https://techdecoded.intel.io/essentials/speed-and-scale-ai-inference-operations-across-multiple-architectures/)
|
||||
|
||||
* [OpenVINO™](https://software.intel.com/en-us/openvino-toolkit)
|
||||
|
||||
* [TensorFlow Serving](https://github.com/tensorflow/serving)
|
||||
|
||||
* [gRPC](https://grpc.io/)
|
||||
|
||||
* [RESTful API](https://restfulapi.net/)
|
||||
|
||||
* [Inference at Scale in Kubernetes](https://www.intel.ai/inference-at-scale-in-kubernetes)
|
||||
|
||||
|
||||
|
||||
---
|
||||
\* Other names and brands may be claimed as the property of others.
|
||||
@@ -9,9 +9,9 @@
|
||||
**Detailed description**: For each element from the input tensor calculates corresponding
|
||||
element in the output tensor with the following formula:
|
||||
|
||||
\f[
|
||||
HSwish(x) = x \frac{min(max(x + 3, 0), 6)}{6}
|
||||
\f]
|
||||
\f[
|
||||
HSwish(x) = x \frac{min(max(x + 3, 0), 6)}{6}
|
||||
\f]
|
||||
|
||||
The HSwish operation is introduced in the following [article](https://arxiv.org/pdf/1905.02244.pdf).
|
||||
|
||||
|
||||
@@ -9,9 +9,9 @@
|
||||
**Detailed description**: For each element from the input tensor calculates corresponding
|
||||
element in the output tensor with the following formula:
|
||||
|
||||
\f[
|
||||
SoftPlus(x) = ln(e^{x} + 1.0)
|
||||
\f]
|
||||
\f[
|
||||
SoftPlus(x) = ln(e^{x} + 1.0)
|
||||
\f]
|
||||
|
||||
**Attributes**: *SoftPlus* operation has no attributes.
|
||||
|
||||
|
||||
@@ -78,9 +78,9 @@
|
||||
|
||||
**Mathematical Formulation**
|
||||
|
||||
\f[
|
||||
output_{j} = \frac{\sum_{i = 0}^{n}x_{i}}{n}
|
||||
\f]
|
||||
\f[
|
||||
output_{j} = \frac{\sum_{i = 0}^{n}x_{i}}{n}
|
||||
\f]
|
||||
|
||||
**Example**
|
||||
|
||||
|
||||
@@ -70,9 +70,9 @@
|
||||
|
||||
**Mathematical Formulation**
|
||||
|
||||
\f[
|
||||
output_{j} = MAX\{ x_{0}, ... x_{i}\}
|
||||
\f]
|
||||
\f[
|
||||
output_{j} = MAX\{ x_{0}, ... x_{i}\}
|
||||
\f]
|
||||
|
||||
**Example**
|
||||
|
||||
|
||||
@@ -4,12 +4,6 @@ This topic demonstrates how to use the Benchmark C++ Tool to estimate deep learn
|
||||
|
||||
> **NOTE:** This topic describes usage of C++ implementation of the Benchmark Tool. For the Python* implementation, refer to [Benchmark Python* Tool](../../tools/benchmark_tool/README.md).
|
||||
|
||||
> **TIP**: You also can work with the Benchmark Tool inside the OpenVINO™ [Deep Learning Workbench](@ref workbench_docs_Workbench_DG_Introduction) (DL Workbench).
|
||||
> [DL Workbench](@ref workbench_docs_Workbench_DG_Introduction) is a platform built upon OpenVINO™ and provides a web-based graphical environment that enables you to optimize, fine-tune, analyze, visualize, and compare
|
||||
> performance of deep learning models on various Intel® architecture
|
||||
> configurations. In the DL Workbench, you can use most of OpenVINO™ toolkit components.
|
||||
> <br>
|
||||
> Proceed to an [easy installation from Docker](@ref workbench_docs_Workbench_DG_Install_from_Docker_Hub) to get started.
|
||||
|
||||
## How It Works
|
||||
|
||||
@@ -49,7 +43,6 @@ The application also saves executable graph information serialized to an XML fil
|
||||
|
||||
|
||||
## Run the Tool
|
||||
|
||||
Note that the benchmark_app usually produces optimal performance for any device out of the box.
|
||||
|
||||
**So in most cases you don't need to play the app options explicitly and the plain device name is enough**, for example, for CPU:
|
||||
|
||||
@@ -11,6 +11,7 @@ for %%A in ("%GNA%") do set GNA_FILENAME=%%~nxA
|
||||
for %%A in ("%OPENCV%") do set OPENCV_FILENAME=%%~nxA
|
||||
for %%A in ("%MYRIAD%") do set MYRIAD_FILENAME=%%~nxA
|
||||
for %%A in ("%HDDL%") do set HDDL_FILENAME=%%~nxA
|
||||
for %%A in ("%HDDL_UNITE%") do set HDDL_UNITE_FILENAME=%%~nxA
|
||||
for %%A in ("%VPU_FIRMWARE_MA2450%") do set VPU_FIRMWARE_MA2450_FILENAME=%%~nxA
|
||||
for %%A in ("%VPU_FIRMWARE_MA2X8X%") do set VPU_FIRMWARE_MA2X8X_FILENAME=%%~nxA
|
||||
for %%A in ("%TBB%") do set TBB_FILENAME=%%~nxA
|
||||
@@ -86,6 +87,16 @@ if not "%HDDL%"=="" (
|
||||
)
|
||||
)
|
||||
|
||||
if not "%HDDL_UNITE%"=="" (
|
||||
if not exist "%DL_SDK_TEMP%\test_dependencies\HDDL_UNITE\%HDDL_UNITE_FILENAME%" (
|
||||
mkdir "%DL_SDK_TEMP%\test_dependencies\HDDL_UNITE"
|
||||
powershell -command "iwr -outf '%DL_SDK_TEMP%\test_dependencies\HDDL_UNITE\_%HDDL_UNITE_FILENAME%' %HDDL_UNITE%"
|
||||
mkdir "%DL_SDK_TEMP%\test_dependencies\HDDL_UNITE\%HDDL_UNITE_FILENAME%"
|
||||
call "C:\Program Files\7-Zip\7z.exe" x -y %DL_SDK_TEMP%\test_dependencies\HDDL_UNITE\_%HDDL_UNITE_FILENAME% -o%DL_SDK_TEMP%\test_dependencies\HDDL_UNITE\%HDDL_UNITE_FILENAME%
|
||||
del "%DL_SDK_TEMP%\test_dependencies\HDDL_UNITE\_%HDDL_UNITE_FILENAME%" /F /Q
|
||||
)
|
||||
)
|
||||
|
||||
if not "%VPU_FIRMWARE_MA2450%"=="" (
|
||||
if not exist "%DL_SDK_TEMP%\test_dependencies\VPU\%VPU_FIRMWARE_MA2450_FILENAME%" (
|
||||
mkdir "%DL_SDK_TEMP%\test_dependencies\VPU"
|
||||
@@ -124,6 +135,7 @@ set PATH=%DL_SDK_TEMP%\test_dependencies\OMP\%OMP_FILENAME%%OMP%;%PATH%
|
||||
set PATH=%DL_SDK_TEMP%\test_dependencies\GNA\%GNA_FILENAME%%GNA%;%PATH%
|
||||
set PATH=%DL_SDK_TEMP%\test_dependencies\OPENCV\%OPENCV_FILENAME%%OPENCV%;%PATH%
|
||||
set PATH=%DL_SDK_TEMP%\test_dependencies\TBB\%TBB_FILENAME%%TBB%;%PATH%
|
||||
set PATH=%DL_SDK_TEMP%\test_dependencies\HDDL_UNITE\%HDDL_UNITE_FILENAME%%HDDL_UNITE%;%PATH%
|
||||
|
||||
set PATH=%DL_SDK_TEMP%\test_dependencies\MYRIAD\%MYRIAD_FILENAME%%MYRIAD%;%PATH%
|
||||
|
||||
|
||||
@@ -37,7 +37,7 @@ add_path() {
|
||||
fi
|
||||
}
|
||||
|
||||
runtimes=(MKL CLDNN MYRIAD GNA DLIA OPENCV VPU_FIRMWARE_USB-MA2450 VPU_FIRMWARE_USB-MA2X8X HDDL OMP TBB AOCL_RTE LIBUSB)
|
||||
runtimes=(MKL CLDNN MYRIAD GNA DLIA OPENCV VPU_FIRMWARE_USB-MA2450 VPU_FIRMWARE_USB-MA2X8X HDDL HDDL_UNITE OMP TBB AOCL_RTE LIBUSB)
|
||||
|
||||
export_library_path() {
|
||||
export LD_LIBRARY_PATH=$DL_SDK_TEMP/test_dependencies/$1:$LD_LIBRARY_PATH
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
namespace ngraph {
|
||||
namespace op {
|
||||
|
||||
typedef struct {
|
||||
struct InterpolateIEAttrs {
|
||||
int height = -1;
|
||||
int width = -1;
|
||||
float zoom_factor = 0;
|
||||
@@ -26,7 +26,7 @@ typedef struct {
|
||||
std::string mode = "";
|
||||
int pad_beg = 0;
|
||||
int pad_end = 0;
|
||||
} InterpolateIEAttrs;
|
||||
};
|
||||
|
||||
class TRANSFORMATIONS_API Interp : public Op {
|
||||
public:
|
||||
@@ -45,11 +45,11 @@ private:
|
||||
InterpolateIEAttrs m_attrs;
|
||||
};
|
||||
|
||||
typedef struct {
|
||||
struct ResampleIEAttrs {
|
||||
bool antialias = true;
|
||||
int64_t factor = 0;
|
||||
std::string mode = "";
|
||||
} ResampleIEAttrs;
|
||||
};
|
||||
|
||||
class TRANSFORMATIONS_API ResampleV2 : public Op {
|
||||
public:
|
||||
|
||||
@@ -111,7 +111,8 @@ bool ngraph::pass::ConvertOpSet1ToLegacy::run_on_function(std::shared_ptr<ngraph
|
||||
manager.register_pass<ngraph::pass::ConstantFolding>();
|
||||
|
||||
// Multiply the thrird and fourth input instead of the output of FQ with all const inputs
|
||||
manager.register_pass<ngraph::pass::FakeQuantizeMulFusion>();
|
||||
// EDIT: skip pass in order to fix accuracy
|
||||
// manager.register_pass<ngraph::pass::FakeQuantizeMulFusion>();
|
||||
|
||||
// Convolution/Deconvolution/FullyConnected fusions
|
||||
auto convert_convolutions = manager.register_pass<ngraph::pass::GraphRewrite>();
|
||||
|
||||
@@ -9,6 +9,9 @@ set(gtest_force_shared_crt ON CACHE BOOL "disable static CRT for google test")
|
||||
function(add_gtest_libraries)
|
||||
if (UNIX)
|
||||
ie_add_compiler_flags(-Wno-undef)
|
||||
if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
||||
ie_add_compiler_flags(-Wno-deprecated-copy)
|
||||
endif()
|
||||
endif ()
|
||||
|
||||
add_subdirectory(gtest)
|
||||
|
||||
@@ -1 +1 @@
|
||||
72f63b276e710e3c626e0ab3c072fdfd
|
||||
528122362255afd2a12a47a3fd61e816
|
||||
|
||||
@@ -13,14 +13,29 @@ if(NOT TARGET ade)
|
||||
return()
|
||||
endif()
|
||||
|
||||
if(INF_ENGINE_TARGET)
|
||||
ocv_option(OPENCV_GAPI_INF_ENGINE "Build GraphAPI module with Inference Engine support" ON)
|
||||
endif()
|
||||
|
||||
set(the_description "OpenCV G-API Core Module")
|
||||
|
||||
ocv_add_module(gapi opencv_imgproc)
|
||||
ocv_add_module(gapi
|
||||
REQUIRED
|
||||
opencv_imgproc
|
||||
OPTIONAL
|
||||
opencv_video
|
||||
WRAP
|
||||
python
|
||||
)
|
||||
|
||||
if(MSVC)
|
||||
# Disable obsollete warning C4503 popping up on MSVC <<2017
|
||||
# https://docs.microsoft.com/en-us/cpp/error-messages/compiler-warnings/compiler-warning-level-1-c4503?view=vs-2019
|
||||
ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4503)
|
||||
if (OPENCV_GAPI_INF_ENGINE AND NOT INF_ENGINE_RELEASE VERSION_GREATER "2021000000")
|
||||
# Disable IE deprecated code warning C4996 for releases < 2021.1
|
||||
ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4996)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
file(GLOB gapi_ext_hdrs
|
||||
@@ -34,6 +49,7 @@ file(GLOB gapi_ext_hdrs
|
||||
"${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/ocl/*.hpp"
|
||||
"${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/own/*.hpp"
|
||||
"${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/render/*.hpp"
|
||||
"${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/s11n/*.hpp"
|
||||
"${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/streaming/*.hpp"
|
||||
"${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/plaidml/*.hpp"
|
||||
"${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/util/*.hpp"
|
||||
@@ -41,11 +57,13 @@ file(GLOB gapi_ext_hdrs
|
||||
|
||||
set(gapi_srcs
|
||||
# Front-end part
|
||||
src/api/grunarg.cpp
|
||||
src/api/gorigin.cpp
|
||||
src/api/gmat.cpp
|
||||
src/api/garray.cpp
|
||||
src/api/gopaque.cpp
|
||||
src/api/gscalar.cpp
|
||||
src/api/gframe.cpp
|
||||
src/api/gkernel.cpp
|
||||
src/api/gbackend.cpp
|
||||
src/api/gproto.cpp
|
||||
@@ -55,10 +73,14 @@ set(gapi_srcs
|
||||
src/api/operators.cpp
|
||||
src/api/kernels_core.cpp
|
||||
src/api/kernels_imgproc.cpp
|
||||
src/api/kernels_video.cpp
|
||||
src/api/kernels_nnparsers.cpp
|
||||
src/api/kernels_streaming.cpp
|
||||
src/api/render.cpp
|
||||
src/api/render_ocv.cpp
|
||||
src/api/ginfer.cpp
|
||||
src/api/ft_render.cpp
|
||||
src/api/media.cpp
|
||||
src/api/rmat.cpp
|
||||
|
||||
# Compiler part
|
||||
src/compiler/gmodel.cpp
|
||||
@@ -77,6 +99,7 @@ set(gapi_srcs
|
||||
src/compiler/passes/pattern_matching.cpp
|
||||
src/compiler/passes/perform_substitution.cpp
|
||||
src/compiler/passes/streaming.cpp
|
||||
src/compiler/passes/intrin.cpp
|
||||
|
||||
# Executor
|
||||
src/executor/gexecutor.cpp
|
||||
@@ -87,7 +110,9 @@ set(gapi_srcs
|
||||
src/backends/cpu/gcpubackend.cpp
|
||||
src/backends/cpu/gcpukernel.cpp
|
||||
src/backends/cpu/gcpuimgproc.cpp
|
||||
src/backends/cpu/gcpuvideo.cpp
|
||||
src/backends/cpu/gcpucore.cpp
|
||||
src/backends/cpu/gnnparsers.cpp
|
||||
|
||||
# Fluid Backend (also built-in, FIXME:move away)
|
||||
src/backends/fluid/gfluidbuffer.cpp
|
||||
@@ -105,18 +130,30 @@ set(gapi_srcs
|
||||
# IE Backend. FIXME: should be included by CMake
|
||||
# if and only if IE support is enabled
|
||||
src/backends/ie/giebackend.cpp
|
||||
src/backends/ie/giebackend/giewrapper.cpp
|
||||
|
||||
# Render Backend.
|
||||
src/backends/render/grenderocvbackend.cpp
|
||||
# ONNX backend
|
||||
src/backends/onnx/gonnxbackend.cpp
|
||||
|
||||
# Render backend
|
||||
src/backends/render/grenderocv.cpp
|
||||
src/backends/render/ft_render.cpp
|
||||
|
||||
#PlaidML Backend
|
||||
# PlaidML Backend
|
||||
src/backends/plaidml/gplaidmlcore.cpp
|
||||
src/backends/plaidml/gplaidmlbackend.cpp
|
||||
|
||||
# Compound
|
||||
# Common backend code
|
||||
src/backends/common/gmetabackend.cpp
|
||||
src/backends/common/gcompoundbackend.cpp
|
||||
src/backends/common/gcompoundkernel.cpp
|
||||
|
||||
# Serialization API and routines
|
||||
src/api/s11n.cpp
|
||||
src/backends/common/serialization.cpp
|
||||
|
||||
# Python bridge
|
||||
src/backends/ie/bindings_ie.cpp
|
||||
)
|
||||
|
||||
ocv_add_dispatched_file(backends/fluid/gfluidimgproc_func SSE4_1 AVX2)
|
||||
@@ -132,12 +169,20 @@ ocv_module_include_directories("${CMAKE_CURRENT_LIST_DIR}/src")
|
||||
|
||||
ocv_create_module()
|
||||
|
||||
ocv_target_link_libraries(${the_module} PRIVATE ade ${INF_ENGINE_TARGET})
|
||||
ocv_target_link_libraries(${the_module} PRIVATE ade)
|
||||
if(OPENCV_GAPI_INF_ENGINE)
|
||||
ocv_target_link_libraries(${the_module} PRIVATE ${INF_ENGINE_TARGET})
|
||||
endif()
|
||||
if(HAVE_TBB)
|
||||
ocv_target_link_libraries(${the_module} PRIVATE tbb)
|
||||
endif()
|
||||
|
||||
ocv_add_accuracy_tests(${INF_ENGINE_TARGET})
|
||||
set(__test_extra_deps "")
|
||||
if(OPENCV_GAPI_INF_ENGINE)
|
||||
list(APPEND __test_extra_deps ${INF_ENGINE_TARGET})
|
||||
endif()
|
||||
ocv_add_accuracy_tests(${__test_extra_deps})
|
||||
|
||||
# FIXME: test binary is linked with ADE directly since ADE symbols
|
||||
# are not exported from libopencv_gapi.so in any form - thus
|
||||
# there're two copies of ADE code in memory when tests run (!)
|
||||
@@ -165,5 +210,20 @@ if(HAVE_PLAIDML)
|
||||
ocv_target_include_directories(${the_module} SYSTEM PRIVATE ${PLAIDML_INCLUDE_DIRS})
|
||||
endif()
|
||||
|
||||
|
||||
if(WIN32)
|
||||
# Required for htonl/ntohl on Windows
|
||||
ocv_target_link_libraries(${the_module} PRIVATE wsock32 ws2_32)
|
||||
endif()
|
||||
|
||||
if(HAVE_ONNX)
|
||||
ocv_target_link_libraries(${the_module} PRIVATE ${ONNX_LIBRARY})
|
||||
ocv_target_compile_definitions(${the_module} PRIVATE HAVE_ONNX=1)
|
||||
if(TARGET opencv_test_gapi)
|
||||
ocv_target_compile_definitions(opencv_test_gapi PRIVATE HAVE_ONNX=1)
|
||||
ocv_target_link_libraries(opencv_test_gapi PRIVATE ${ONNX_LIBRARY})
|
||||
endif()
|
||||
endif()
|
||||
|
||||
ocv_add_perf_tests()
|
||||
ocv_add_samples()
|
||||
|
||||
@@ -15,6 +15,8 @@ file(GLOB FLUID_includes "${FLUID_ROOT}/include/opencv2/*.hpp"
|
||||
"${FLUID_ROOT}/include/opencv2/gapi/own/*.hpp"
|
||||
"${FLUID_ROOT}/include/opencv2/gapi/fluid/*.hpp")
|
||||
file(GLOB FLUID_sources "${FLUID_ROOT}/src/api/g*.cpp"
|
||||
"${FLUID_ROOT}/src/api/rmat.cpp"
|
||||
"${FLUID_ROOT}/src/api/media.cpp"
|
||||
"${FLUID_ROOT}/src/compiler/*.cpp"
|
||||
"${FLUID_ROOT}/src/compiler/passes/*.cpp"
|
||||
"${FLUID_ROOT}/src/executor/*.cpp"
|
||||
@@ -45,3 +47,8 @@ if(MSVC)
|
||||
endif()
|
||||
|
||||
target_link_libraries(${FLUID_TARGET} PRIVATE ade)
|
||||
|
||||
if(WIN32)
|
||||
# Required for htonl/ntohl on Windows
|
||||
target_link_libraries(${FLUID_TARGET} PRIVATE wsock32 ws2_32)
|
||||
endif()
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
#+TITLE: OpenCV 4.0 Graph API
|
||||
#+TITLE: OpenCV 4.4 Graph API
|
||||
#+AUTHOR: Dmitry Matveev\newline Intel Corporation
|
||||
#+OPTIONS: H:2 toc:t num:t
|
||||
#+LATEX_CLASS: beamer
|
||||
#+LATEX_CLASS_OPTIONS: [presentation]
|
||||
#+LATEX_HEADER: \usepackage{transparent} \usepackage{listings} \usepackage{pgfplots} \usepackage{mtheme.sty/beamerthememetropolis}
|
||||
#+LATEX_HEADER: \setbeamertemplate{frame footer}{OpenCV 4.0 G-API: Overview and programming by example}
|
||||
#+LATEX_HEADER: \setbeamertemplate{frame footer}{OpenCV 4.4 G-API: Overview and programming by example}
|
||||
#+BEAMER_HEADER: \subtitle{Overview and programming by example}
|
||||
#+BEAMER_HEADER: \titlegraphic{ \vspace*{3cm}\hspace*{5cm} {\transparent{0.2}\includegraphics[height=\textheight]{ocv_logo.eps}}}
|
||||
#+COLUMNS: %45ITEM %10BEAMER_ENV(Env) %10BEAMER_ACT(Act) %4BEAMER_COL(Col) %8BEAMER_OPT(Opt)
|
||||
@@ -21,7 +21,7 @@
|
||||
|
||||
- OpenCV meets C++, ~cv::Mat~ replaces ~IplImage*~;
|
||||
|
||||
*** Version 3.0: -- Welcome Transparent API (T-API)
|
||||
*** Version 3.0 -- Welcome Transparent API (T-API)
|
||||
|
||||
- ~cv::UMat~ is introduced as a /transparent/ addition to
|
||||
~cv::Mat~;
|
||||
@@ -32,7 +32,7 @@
|
||||
** OpenCV evolution in one slide (cont'd)
|
||||
# FIXME: Learn proper page-breaking!
|
||||
|
||||
*** Version 4.0: -- Welcome Graph API (G-API)
|
||||
*** Version 4.0 -- Welcome Graph API (G-API)
|
||||
|
||||
- A new separate module (not a full library rewrite);
|
||||
- A framework (or even a /meta/-framework);
|
||||
@@ -45,6 +45,24 @@
|
||||
- Kernels can be written in unconstrained platform-native code;
|
||||
- Halide can serve as a backend (one of many).
|
||||
|
||||
** OpenCV evolution in one slide (cont'd)
|
||||
# FIXME: Learn proper page-breaking!
|
||||
|
||||
*** Version 4.2 -- New horizons
|
||||
|
||||
- Introduced in-graph inference via OpenVINO™ Toolkit;
|
||||
- Introduced video-oriented Streaming execution mode;
|
||||
- Extended focus from individual image processing to the full
|
||||
application pipeline optimization.
|
||||
|
||||
*** Version 4.4 -- More on video
|
||||
|
||||
- Introduced a notion of stateful kernels;
|
||||
- The road to object tracking, background subtraction, etc. in the
|
||||
graph;
|
||||
- Added more video-oriented operations (feature detection, Optical
|
||||
flow).
|
||||
|
||||
** Why G-API?
|
||||
|
||||
*** Why introduce a new execution model?
|
||||
@@ -80,7 +98,7 @@
|
||||
- *Heterogeneity* gets extra benefits like:
|
||||
- Avoiding unnecessary data transfers;
|
||||
- Shadowing transfer costs with parallel host co-execution;
|
||||
- Increasing system throughput with frame-level pipelining.
|
||||
- Improving system throughput with frame-level pipelining.
|
||||
|
||||
* Programming with G-API
|
||||
|
||||
@@ -96,7 +114,34 @@
|
||||
- What data objects are /inputs/ to the graph?
|
||||
- What are its /outputs/?
|
||||
|
||||
** A code is worth a thousand words
|
||||
** The code is worth a thousand words
|
||||
:PROPERTIES:
|
||||
:BEAMER_opt: shrink=42
|
||||
:END:
|
||||
|
||||
#+BEGIN_SRC C++
|
||||
#include <opencv2/gapi.hpp> // G-API framework header
|
||||
#include <opencv2/gapi/imgproc.hpp> // cv::gapi::blur()
|
||||
#include <opencv2/highgui.hpp> // cv::imread/imwrite
|
||||
|
||||
int main(int argc, char *argv[]) {
|
||||
if (argc < 3) return 1;
|
||||
|
||||
cv::GMat in; // Express the graph:
|
||||
cv::GMat out = cv::gapi::blur(in, cv::Size(3,3)); // `out` is a result of `blur` of `in`
|
||||
|
||||
cv::Mat in_mat = cv::imread(argv[1]); // Get the real data
|
||||
cv::Mat out_mat; // Output buffer (may be empty)
|
||||
|
||||
cv::GComputation(cv::GIn(in), cv::GOut(out)) // Declare a graph from `in` to `out`
|
||||
.apply(cv::gin(in_mat), cv::gout(out_mat)); // ...and run it immediately
|
||||
|
||||
cv::imwrite(argv[2], out_mat); // Save the result
|
||||
return 0;
|
||||
}
|
||||
#+END_SRC
|
||||
|
||||
** The code is worth a thousand words
|
||||
:PROPERTIES:
|
||||
:BEAMER_opt: shrink=42
|
||||
:END:
|
||||
@@ -161,7 +206,7 @@ int main(int argc, char *argv[]) {
|
||||
}
|
||||
#+END_SRC
|
||||
|
||||
** A code is worth a thousand words (cont'd)
|
||||
** The code is worth a thousand words (cont'd)
|
||||
# FIXME: sections!!!
|
||||
|
||||
*** What we have just learned?
|
||||
@@ -183,59 +228,82 @@ cv::GComputation(cv::GIn(...), cv::GOut(...))
|
||||
** On data objects
|
||||
|
||||
Graph *protocol* defines what arguments a computation was defined on
|
||||
(both inputs and outputs), and what are the *shapes* (or types) of
|
||||
those arguments:
|
||||
(both inputs and outputs), and what are the *shapes* (or types) of
|
||||
those arguments:
|
||||
|
||||
| *Shape* | *Argument* | Size |
|
||||
|-------------+------------------+-----------------------------|
|
||||
| ~GMat~ | ~Mat~ | Static; defined during |
|
||||
| | | graph compilation |
|
||||
|-------------+------------------+-----------------------------|
|
||||
| ~GScalar~ | ~Scalar~ | 4 x ~double~ |
|
||||
|-------------+------------------+-----------------------------|
|
||||
| ~GArray<T>~ | ~std::vector<T>~ | Dynamic; defined in runtime |
|
||||
| *Shape* | *Argument* | Size |
|
||||
|--------------+------------------+-----------------------------|
|
||||
| ~GMat~ | ~Mat~ | Static; defined during |
|
||||
| | | graph compilation |
|
||||
|--------------+------------------+-----------------------------|
|
||||
| ~GScalar~ | ~Scalar~ | 4 x ~double~ |
|
||||
|--------------+------------------+-----------------------------|
|
||||
| ~GArray<T>~ | ~std::vector<T>~ | Dynamic; defined in runtime |
|
||||
|--------------+------------------+-----------------------------|
|
||||
| ~GOpaque<T>~ | ~T~ | Static, ~sizeof(T)~ |
|
||||
|
||||
~GScalar~ may be value-initialized at construction time to allow
|
||||
expressions like ~GMat a = 2*(b + 1)~.
|
||||
|
||||
** Customization example
|
||||
** On operations and kernels
|
||||
:PROPERTIES:
|
||||
:BEAMER_opt: shrink=22
|
||||
:END:
|
||||
|
||||
*** Tuning the execution
|
||||
*** :B_block:BMCOL:
|
||||
:PROPERTIES:
|
||||
:BEAMER_env: block
|
||||
:BEAMER_col: 0.45
|
||||
:END:
|
||||
|
||||
- Graph execution model is defined by kernels which are used;
|
||||
- Kernels can be specified in graph compilation arguments:
|
||||
#+LaTeX: {\footnotesize
|
||||
#+BEGIN_SRC C++
|
||||
#include <opencv2/gapi/fluid/core.hpp>
|
||||
#include <opencv2/gapi/fluid/imgproc.hpp>
|
||||
...
|
||||
auto pkg = gapi::combine(gapi::core::fluid::kernels(),
|
||||
gapi::imgproc::fluid::kernels(),
|
||||
cv::unite_policy::KEEP);
|
||||
sobel.apply(in_mat, out_mat, compile_args(pkg));
|
||||
#+END_SRC
|
||||
#+LaTeX: }
|
||||
- OpenCL backend can be used in the same way;
|
||||
#+LaTeX: {\footnotesize
|
||||
- *NOTE*: ~cv::unite_policy~ has been removed in OpenCV 4.1.1.
|
||||
#+LaTeX: }
|
||||
- Graphs are built with *Operations* over virtual *Data*;
|
||||
- *Operations* define interfaces (literally);
|
||||
- *Kernels* are implementations to *Operations* (like in OOP);
|
||||
- An *Operation* is platform-agnostic, a *kernel* is not;
|
||||
- *Kernels* are implemented for *Backends*, the latter provide
|
||||
APIs to write kernels;
|
||||
- Users can /add/ their *own* operations and kernels,
|
||||
and also /redefine/ "standard" kernels their *own* way.
|
||||
|
||||
** Operations and Kernels
|
||||
*** :B_block:BMCOL:
|
||||
:PROPERTIES:
|
||||
:BEAMER_env: block
|
||||
:BEAMER_col: 0.45
|
||||
:END:
|
||||
|
||||
*** Specifying a kernel package
|
||||
#+BEGIN_SRC dot :file "000-ops-kernels.eps" :cmdline "-Kdot -Teps"
|
||||
digraph G {
|
||||
node [shape=box];
|
||||
rankdir=BT;
|
||||
|
||||
- A *kernel* is an implementation of *operation* (= interface);
|
||||
- A *kernel package* hosts kernels that G-API should use;
|
||||
- Kernels are written for different *backends* and using their APIs;
|
||||
- Two kernel packages can be *merged* into a single one;
|
||||
- User can safely supply his *own kernels* to either /replace/ or
|
||||
/augment/ the default package.
|
||||
- Yes, even the standard kernels can be /overwritten/ by user from
|
||||
the outside!
|
||||
- *Heterogeneous* kernel package hosts kernels of different backends.
|
||||
Gr [label="Graph"];
|
||||
Op [label="Operation\nA"];
|
||||
{rank=same
|
||||
Impl1 [label="Kernel\nA:2"];
|
||||
Impl2 [label="Kernel\nA:1"];
|
||||
}
|
||||
|
||||
** Operations and Kernels (cont'd)
|
||||
# FIXME!!!
|
||||
Op -> Gr [dir=back, label="'consists of'"];
|
||||
Impl1 -> Op [];
|
||||
Impl2 -> Op [label="'is implemented by'"];
|
||||
|
||||
node [shape=note,style=dashed];
|
||||
{rank=same
|
||||
Op;
|
||||
CommentOp [label="Abstract:\ndeclared via\nG_API_OP()"];
|
||||
}
|
||||
{rank=same
|
||||
Comment1 [label="Platform:\ndefined with\nOpenCL backend"];
|
||||
Comment2 [label="Platform:\ndefined with\nOpenCV backend"];
|
||||
}
|
||||
|
||||
CommentOp -> Op [constraint=false, style=dashed, arrowhead=none];
|
||||
Comment1 -> Impl1 [style=dashed, arrowhead=none];
|
||||
Comment2 -> Impl2 [style=dashed, arrowhead=none];
|
||||
}
|
||||
#+END_SRC
|
||||
|
||||
** On operations and kernels (cont'd)
|
||||
|
||||
*** Defining an operation
|
||||
|
||||
@@ -245,16 +313,43 @@ Graph *protocol* defines what arguments a computation was defined on
|
||||
- Metadata callback -- describe what is the output value format(s),
|
||||
given the input and arguments.
|
||||
- Use ~OpType::on(...)~ to use a new kernel ~OpType~ to construct graphs.
|
||||
|
||||
#+LaTeX: {\footnotesize
|
||||
#+BEGIN_SRC C++
|
||||
G_TYPED_KERNEL(GSqrt,<GMat(GMat)>,"org.opencv.core.math.sqrt") {
|
||||
G_API_OP(GSqrt,<GMat(GMat)>,"org.opencv.core.math.sqrt") {
|
||||
static GMatDesc outMeta(GMatDesc in) { return in; }
|
||||
};
|
||||
#+END_SRC
|
||||
#+LaTeX: }
|
||||
|
||||
** Operations and Kernels (cont'd)
|
||||
# FIXME!!!
|
||||
** On operations and kernels (cont'd)
|
||||
|
||||
*** ~GSqrt~ vs. ~cv::gapi::sqrt()~
|
||||
|
||||
- How a *type* relates to a *functions* from the example?
|
||||
- These functions are just wrappers over ~::on~:
|
||||
#+LaTeX: {\scriptsize
|
||||
#+BEGIN_SRC C++
|
||||
G_API_OP(GSqrt,<GMat(GMat)>,"org.opencv.core.math.sqrt") {
|
||||
static GMatDesc outMeta(GMatDesc in) { return in; }
|
||||
};
|
||||
GMat gapi::sqrt(const GMat& src) { return GSqrt::on(src); }
|
||||
#+END_SRC
|
||||
#+LaTeX: }
|
||||
- Why -- Doxygen, default parameters, 1:n mapping:
|
||||
#+LaTeX: {\scriptsize
|
||||
#+BEGIN_SRC C++
|
||||
cv::GMat custom::unsharpMask(const cv::GMat &src,
|
||||
const int sigma,
|
||||
const float strength) {
|
||||
cv::GMat blurred = cv::gapi::medianBlur(src, sigma);
|
||||
cv::GMat laplacian = cv::gapi::Laplacian(blurred, CV_8U);
|
||||
return (src - (laplacian * strength));
|
||||
}
|
||||
#+END_SRC
|
||||
#+LaTeX: }
|
||||
|
||||
** On operations and kernels (cont'd)
|
||||
|
||||
*** Implementing an operation
|
||||
|
||||
@@ -297,6 +392,467 @@ G_TYPED_KERNEL(GSqrt,<GMat(GMat)>,"org.opencv.core.math.sqrt") {
|
||||
- Note ~run~ changes signature but still is derived from the operation
|
||||
signature.
|
||||
|
||||
** Operations and Kernels (cont'd)
|
||||
|
||||
*** Specifying which kernels to use
|
||||
|
||||
- Graph execution model is defined by kernels which are available/used;
|
||||
- Kernels can be specified via the graph compilation arguments:
|
||||
#+LaTeX: {\footnotesize
|
||||
#+BEGIN_SRC C++
|
||||
#include <opencv2/gapi/fluid/core.hpp>
|
||||
#include <opencv2/gapi/fluid/imgproc.hpp>
|
||||
...
|
||||
auto pkg = cv::gapi::combine(cv::gapi::core::fluid::kernels(),
|
||||
cv::gapi::imgproc::fluid::kernels());
|
||||
sobel.apply(in_mat, out_mat, cv::compile_args(pkg));
|
||||
#+END_SRC
|
||||
#+LaTeX: }
|
||||
- Users can combine kernels of different backends and G-API will partition
|
||||
the execution among those automatically.
|
||||
|
||||
** Heterogeneity in G-API
|
||||
:PROPERTIES:
|
||||
:BEAMER_opt: shrink=35
|
||||
:END:
|
||||
*** Automatic subgraph partitioning in G-API
|
||||
*** :B_block:BMCOL:
|
||||
:PROPERTIES:
|
||||
:BEAMER_env: block
|
||||
:BEAMER_col: 0.18
|
||||
:END:
|
||||
|
||||
#+BEGIN_SRC dot :file "010-hetero-init.eps" :cmdline "-Kdot -Teps"
|
||||
digraph G {
|
||||
rankdir=TB;
|
||||
ranksep=0.3;
|
||||
|
||||
node [shape=box margin=0 height=0.25];
|
||||
A; B; C;
|
||||
|
||||
node [shape=ellipse];
|
||||
GMat0;
|
||||
GMat1;
|
||||
GMat2;
|
||||
GMat3;
|
||||
|
||||
GMat0 -> A -> GMat1 -> B -> GMat2;
|
||||
GMat2 -> C;
|
||||
GMat0 -> C -> GMat3
|
||||
|
||||
subgraph cluster {style=invis; A; GMat1; B; GMat2; C};
|
||||
}
|
||||
#+END_SRC
|
||||
|
||||
The initial graph: operations are not resolved yet.
|
||||
|
||||
*** :B_block:BMCOL:
|
||||
:PROPERTIES:
|
||||
:BEAMER_env: block
|
||||
:BEAMER_col: 0.18
|
||||
:END:
|
||||
|
||||
#+BEGIN_SRC dot :file "011-hetero-homo.eps" :cmdline "-Kdot -Teps"
|
||||
digraph G {
|
||||
rankdir=TB;
|
||||
ranksep=0.3;
|
||||
|
||||
node [shape=box margin=0 height=0.25];
|
||||
A; B; C;
|
||||
|
||||
node [shape=ellipse];
|
||||
GMat0;
|
||||
GMat1;
|
||||
GMat2;
|
||||
GMat3;
|
||||
|
||||
GMat0 -> A -> GMat1 -> B -> GMat2;
|
||||
GMat2 -> C;
|
||||
GMat0 -> C -> GMat3
|
||||
|
||||
subgraph cluster {style=filled;color=azure2; A; GMat1; B; GMat2; C};
|
||||
}
|
||||
#+END_SRC
|
||||
|
||||
All operations are handled by the same backend.
|
||||
|
||||
*** :B_block:BMCOL:
|
||||
:PROPERTIES:
|
||||
:BEAMER_env: block
|
||||
:BEAMER_col: 0.18
|
||||
:END:
|
||||
|
||||
#+BEGIN_SRC dot :file "012-hetero-a.eps" :cmdline "-Kdot -Teps"
|
||||
digraph G {
|
||||
rankdir=TB;
|
||||
ranksep=0.3;
|
||||
|
||||
node [shape=box margin=0 height=0.25];
|
||||
A; B; C;
|
||||
|
||||
node [shape=ellipse];
|
||||
GMat0;
|
||||
GMat1;
|
||||
GMat2;
|
||||
GMat3;
|
||||
|
||||
GMat0 -> A -> GMat1 -> B -> GMat2;
|
||||
GMat2 -> C;
|
||||
GMat0 -> C -> GMat3
|
||||
|
||||
subgraph cluster_1 {style=filled;color=azure2; A; GMat1; B; }
|
||||
subgraph cluster_2 {style=filled;color=ivory2; C};
|
||||
}
|
||||
#+END_SRC
|
||||
|
||||
~A~ & ~B~ are of backend ~1~, ~C~ is of backend ~2~.
|
||||
|
||||
*** :B_block:BMCOL:
|
||||
:PROPERTIES:
|
||||
:BEAMER_env: block
|
||||
:BEAMER_col: 0.18
|
||||
:END:
|
||||
|
||||
#+BEGIN_SRC dot :file "013-hetero-b.eps" :cmdline "-Kdot -Teps"
|
||||
digraph G {
|
||||
rankdir=TB;
|
||||
ranksep=0.3;
|
||||
|
||||
node [shape=box margin=0 height=0.25];
|
||||
A; B; C;
|
||||
|
||||
node [shape=ellipse];
|
||||
GMat0;
|
||||
GMat1;
|
||||
GMat2;
|
||||
GMat3;
|
||||
|
||||
GMat0 -> A -> GMat1 -> B -> GMat2;
|
||||
GMat2 -> C;
|
||||
GMat0 -> C -> GMat3
|
||||
|
||||
subgraph cluster_1 {style=filled;color=azure2; A};
|
||||
subgraph cluster_2 {style=filled;color=ivory2; B};
|
||||
subgraph cluster_3 {style=filled;color=azure2; C};
|
||||
}
|
||||
#+END_SRC
|
||||
|
||||
~A~ & ~C~ are of backend ~1~, ~B~ is of backend ~2~.
|
||||
|
||||
** Heterogeneity in G-API
|
||||
|
||||
*** Heterogeneity summary
|
||||
|
||||
- G-API automatically partitions its graph in subgraphs (called "islands")
|
||||
based on the available kernels;
|
||||
- Adjacent kernels taken from the same backend are "fused" into the same
|
||||
"island";
|
||||
- G-API implements a two-level execution model:
|
||||
- Islands are executed at the top level by a G-API's *Executor*;
|
||||
- Island internals are run at the bottom level by its *Backend*;
|
||||
- G-API fully delegates the low-level execution and memory management to backends.
|
||||
|
||||
* Inference and Streaming
|
||||
|
||||
** Inference with G-API
|
||||
|
||||
*** In-graph inference example
|
||||
|
||||
- Starting with OpencV 4.2 (2019), G-API allows to integrate ~infer~
|
||||
operations into the graph:
|
||||
#+LaTeX: {\scriptsize
|
||||
#+BEGIN_SRC C++
|
||||
G_API_NET(ObjDetect, <cv::GMat(cv::GMat)>, "pdf.example.od");
|
||||
|
||||
cv::GMat in;
|
||||
cv::GMat blob = cv::gapi::infer<ObjDetect>(bgr);
|
||||
cv::GOpaque<cv::Size> size = cv::gapi::streaming::size(bgr);
|
||||
cv::GArray<cv::Rect> objs = cv::gapi::streaming::parseSSD(blob, size);
|
||||
cv::GComputation pipelne(cv::GIn(in), cv::GOut(objs));
|
||||
#+END_SRC
|
||||
#+LaTeX: }
|
||||
- Starting with OpenCV 4.5 (2020), G-API will provide more streaming-
|
||||
and NN-oriented operations out of the box.
|
||||
|
||||
** Inference with G-API
|
||||
|
||||
*** What is the difference?
|
||||
|
||||
- ~ObjDetect~ is not an operation, ~cv::gapi::infer<T>~ is;
|
||||
- ~cv::gapi::infer<T>~ is a *generic* operation, where ~T=ObjDetect~ describes
|
||||
the calling convention:
|
||||
- How many inputs the network consumes,
|
||||
- How many outputs the network produces.
|
||||
- Inference data types are ~GMat~ only:
|
||||
- Representing an image, then preprocessed automatically;
|
||||
- Representing a blob (n-dimensional ~Mat~), then passed as-is.
|
||||
- Inference *backends* only need to implement a single generic operation ~infer~.
|
||||
|
||||
** Inference with G-API
|
||||
|
||||
*** But how does it run?
|
||||
|
||||
- Since ~infer~ is an *Operation*, backends may provide *Kernels* implenting it;
|
||||
- The only publicly available inference backend now is *OpenVINO™*:
|
||||
- Brings its ~infer~ kernel atop of the Inference Engine;
|
||||
- NN model data is passed through G-API compile arguments (like kernels);
|
||||
- Every NN backend provides its own structure to configure the network (like
|
||||
a kernel API).
|
||||
|
||||
** Inference with G-API
|
||||
|
||||
*** Passing OpenVINO™ parameters to G-API
|
||||
|
||||
- ~ObjDetect~ example:
|
||||
#+LaTeX: {\footnotesize
|
||||
#+BEGIN_SRC C++
|
||||
auto face_net = cv::gapi::ie::Params<ObjDetect> {
|
||||
face_xml_path, // path to the topology IR
|
||||
face_bin_path, // path to the topology weights
|
||||
face_device_string, // OpenVINO plugin (device) string
|
||||
};
|
||||
auto networks = cv::gapi::networks(face_net);
|
||||
pipeline.compile(.., cv::compile_args(..., networks));
|
||||
#+END_SRC
|
||||
#+LaTeX: }
|
||||
- ~AgeGender~ requires binding Op's outputs to NN layers:
|
||||
#+LaTeX: {\footnotesize
|
||||
#+BEGIN_SRC C++
|
||||
auto age_net = cv::gapi::ie::Params<AgeGender> {
|
||||
...
|
||||
}.cfgOutputLayers({"age_conv3", "prob"}); // array<string,2> !
|
||||
#+END_SRC
|
||||
#+LaTeX: }
|
||||
|
||||
** Streaming with G-API
|
||||
|
||||
#+BEGIN_SRC dot :file 020-fd-demo.eps :cmdline "-Kdot -Teps"
|
||||
digraph {
|
||||
rankdir=LR;
|
||||
node [shape=box];
|
||||
|
||||
cap [label=Capture];
|
||||
dec [label=Decode];
|
||||
res [label=Resize];
|
||||
cnn [label=Infer];
|
||||
vis [label=Visualize];
|
||||
|
||||
cap -> dec;
|
||||
dec -> res;
|
||||
res -> cnn;
|
||||
cnn -> vis;
|
||||
}
|
||||
#+END_SRC
|
||||
Anatomy of a regular video analytics application
|
||||
|
||||
** Streaming with G-API
|
||||
|
||||
#+BEGIN_SRC dot :file 021-fd-serial.eps :cmdline "-Kdot -Teps"
|
||||
digraph {
|
||||
node [shape=box margin=0 width=0.3 height=0.4]
|
||||
nodesep=0.2;
|
||||
rankdir=LR;
|
||||
|
||||
subgraph cluster0 {
|
||||
colorscheme=blues9
|
||||
pp [label="..." shape=plaintext];
|
||||
v0 [label=V];
|
||||
label="Frame N-1";
|
||||
color=7;
|
||||
}
|
||||
|
||||
subgraph cluster1 {
|
||||
colorscheme=blues9
|
||||
c1 [label=C];
|
||||
d1 [label=D];
|
||||
r1 [label=R];
|
||||
i1 [label=I];
|
||||
v1 [label=V];
|
||||
label="Frame N";
|
||||
color=6;
|
||||
}
|
||||
|
||||
subgraph cluster2 {
|
||||
colorscheme=blues9
|
||||
c2 [label=C];
|
||||
nn [label="..." shape=plaintext];
|
||||
label="Frame N+1";
|
||||
color=5;
|
||||
}
|
||||
|
||||
c1 -> d1 -> r1 -> i1 -> v1;
|
||||
|
||||
pp-> v0;
|
||||
v0 -> c1 [style=invis];
|
||||
v1 -> c2 [style=invis];
|
||||
c2 -> nn;
|
||||
}
|
||||
#+END_SRC
|
||||
Serial execution of the sample video analytics application
|
||||
|
||||
** Streaming with G-API
|
||||
:PROPERTIES:
|
||||
:BEAMER_opt: shrink
|
||||
:END:
|
||||
|
||||
#+BEGIN_SRC dot :file 022-fd-pipelined.eps :cmdline "-Kdot -Teps"
|
||||
digraph {
|
||||
nodesep=0.2;
|
||||
ranksep=0.2;
|
||||
node [margin=0 width=0.4 height=0.2];
|
||||
node [shape=plaintext]
|
||||
Camera [label="Camera:"];
|
||||
GPU [label="GPU:"];
|
||||
FPGA [label="FPGA:"];
|
||||
CPU [label="CPU:"];
|
||||
Time [label="Time:"];
|
||||
t6 [label="T6"];
|
||||
t7 [label="T7"];
|
||||
t8 [label="T8"];
|
||||
t9 [label="T9"];
|
||||
t10 [label="T10"];
|
||||
tnn [label="..."];
|
||||
|
||||
node [shape=box margin=0 width=0.4 height=0.4 colorscheme=blues9]
|
||||
node [color=9] V3;
|
||||
node [color=8] F4; V4;
|
||||
node [color=7] DR5; F5; V5;
|
||||
node [color=6] C6; DR6; F6; V6;
|
||||
node [color=5] C7; DR7; F7; V7;
|
||||
node [color=4] C8; DR8; F8;
|
||||
node [color=3] C9; DR9;
|
||||
node [color=2] C10;
|
||||
|
||||
{rank=same; rankdir=LR; Camera C6 C7 C8 C9 C10}
|
||||
Camera -> C6 -> C7 -> C8 -> C9 -> C10 [style=invis];
|
||||
|
||||
{rank=same; rankdir=LR; GPU DR5 DR6 DR7 DR8 DR9}
|
||||
GPU -> DR5 -> DR6 -> DR7 -> DR8 -> DR9 [style=invis];
|
||||
|
||||
C6 -> DR5 [style=invis];
|
||||
C6 -> DR6 [constraint=false];
|
||||
C7 -> DR7 [constraint=false];
|
||||
C8 -> DR8 [constraint=false];
|
||||
C9 -> DR9 [constraint=false];
|
||||
|
||||
{rank=same; rankdir=LR; FPGA F4 F5 F6 F7 F8}
|
||||
FPGA -> F4 -> F5 -> F6 -> F7 -> F8 [style=invis];
|
||||
|
||||
DR5 -> F4 [style=invis];
|
||||
DR5 -> F5 [constraint=false];
|
||||
DR6 -> F6 [constraint=false];
|
||||
DR7 -> F7 [constraint=false];
|
||||
DR8 -> F8 [constraint=false];
|
||||
|
||||
{rank=same; rankdir=LR; CPU V3 V4 V5 V6 V7}
|
||||
CPU -> V3 -> V4 -> V5 -> V6 -> V7 [style=invis];
|
||||
|
||||
F4 -> V3 [style=invis];
|
||||
F4 -> V4 [constraint=false];
|
||||
F5 -> V5 [constraint=false];
|
||||
F6 -> V6 [constraint=false];
|
||||
F7 -> V7 [constraint=false];
|
||||
|
||||
{rank=same; rankdir=LR; Time t6 t7 t8 t9 t10 tnn}
|
||||
Time -> t6 -> t7 -> t8 -> t9 -> t10 -> tnn [style=invis];
|
||||
|
||||
CPU -> Time [style=invis];
|
||||
V3 -> t6 [style=invis];
|
||||
V4 -> t7 [style=invis];
|
||||
V5 -> t8 [style=invis];
|
||||
V6 -> t9 [style=invis];
|
||||
V7 -> t10 [style=invis];
|
||||
}
|
||||
#+END_SRC
|
||||
Pipelined execution for the video analytics application
|
||||
|
||||
** Streaming with G-API: Example
|
||||
|
||||
**** Serial mode (4.0) :B_block:BMCOL:
|
||||
:PROPERTIES:
|
||||
:BEAMER_env: block
|
||||
:BEAMER_col: 0.45
|
||||
:END:
|
||||
#+LaTeX: {\tiny
|
||||
#+BEGIN_SRC C++
|
||||
pipeline = cv::GComputation(...);
|
||||
|
||||
cv::VideoCapture cap(input);
|
||||
cv::Mat in_frame;
|
||||
std::vector<cv::Rect> out_faces;
|
||||
|
||||
while (cap.read(in_frame)) {
|
||||
pipeline.apply(cv::gin(in_frame),
|
||||
cv::gout(out_faces),
|
||||
cv::compile_args(kernels,
|
||||
networks));
|
||||
// Process results
|
||||
...
|
||||
}
|
||||
#+END_SRC
|
||||
#+LaTeX: }
|
||||
|
||||
**** Streaming mode (since 4.2) :B_block:BMCOL:
|
||||
:PROPERTIES:
|
||||
:BEAMER_env: block
|
||||
:BEAMER_col: 0.45
|
||||
:END:
|
||||
#+LaTeX: {\tiny
|
||||
#+BEGIN_SRC C++
|
||||
pipeline = cv::GComputation(...);
|
||||
|
||||
auto in_src = cv::gapi::wip::make_src
|
||||
<cv::gapi::wip::GCaptureSource>(input)
|
||||
auto cc = pipeline.compileStreaming
|
||||
(cv::compile_args(kernels, networks))
|
||||
cc.setSource(cv::gin(in_src));
|
||||
cc.start();
|
||||
|
||||
std::vector<cv::Rect> out_faces;
|
||||
while (cc.pull(cv::gout(out_faces))) {
|
||||
// Process results
|
||||
...
|
||||
}
|
||||
#+END_SRC
|
||||
#+LaTeX: }
|
||||
|
||||
**** More information
|
||||
|
||||
#+LaTeX: {\footnotesize
|
||||
https://opencv.org/hybrid-cv-dl-pipelines-with-opencv-4-4-g-api/
|
||||
#+LaTeX: }
|
||||
|
||||
* Latest features
|
||||
** Latest features
|
||||
*** Python API
|
||||
|
||||
- Initial Python3 binding is available now in ~master~ (future 4.5);
|
||||
- Only basic CV functionality is supported (~core~ & ~imgproc~ namespaces,
|
||||
selecting backends);
|
||||
- Adding more programmability, inference, and streaming is next.
|
||||
|
||||
** Latest features
|
||||
*** Python API
|
||||
|
||||
#+LaTeX: {\footnotesize
|
||||
#+BEGIN_SRC Python
|
||||
import numpy as np
|
||||
import cv2 as cv
|
||||
|
||||
sz = (1280, 720)
|
||||
in1 = np.random.randint(0, 100, sz).astype(np.uint8)
|
||||
in2 = np.random.randint(0, 100, sz).astype(np.uint8)
|
||||
|
||||
g_in1 = cv.GMat()
|
||||
g_in2 = cv.GMat()
|
||||
g_out = cv.gapi.add(g_in1, g_in2)
|
||||
gr = cv.GComputation(g_in1, g_in2, g_out)
|
||||
|
||||
pkg = cv.gapi.core.fluid.kernels()
|
||||
out = gr.apply(in1, in2, args=cv.compile_args(pkg))
|
||||
#+END_SRC
|
||||
#+LaTeX: }
|
||||
|
||||
* Understanding the "G-Effect"
|
||||
|
||||
** Understanding the "G-Effect"
|
||||
@@ -384,15 +940,22 @@ speed-up on QVGA taken as 1.0).
|
||||
* Resources on G-API
|
||||
|
||||
** Resources on G-API
|
||||
|
||||
:PROPERTIES:
|
||||
:BEAMER_opt: shrink
|
||||
:END:
|
||||
*** Repository
|
||||
|
||||
- https://github.com/opencv/opencv (see ~modules/gapi~)
|
||||
- Integral part of OpenCV starting version 4.0;
|
||||
|
||||
*** Article
|
||||
|
||||
- https://opencv.org/hybrid-cv-dl-pipelines-with-opencv-4-4-g-api/
|
||||
|
||||
*** Documentation
|
||||
|
||||
- https://docs.opencv.org/master/d0/d1e/gapi.html
|
||||
- A tutorial and a class reference are there as well.
|
||||
- https://docs.opencv.org/4.4.0/d0/d1e/gapi.html
|
||||
|
||||
*** Tutorials
|
||||
- https://docs.opencv.org/4.4.0/df/d7e/tutorial_table_of_content_gapi.html
|
||||
|
||||
* Thank you!
|
||||
|
||||
@@ -24,10 +24,17 @@
|
||||
|
||||
#include <opencv2/gapi/gmat.hpp>
|
||||
#include <opencv2/gapi/garray.hpp>
|
||||
#include <opencv2/gapi/gscalar.hpp>
|
||||
#include <opencv2/gapi/gopaque.hpp>
|
||||
#include <opencv2/gapi/gframe.hpp>
|
||||
#include <opencv2/gapi/gcomputation.hpp>
|
||||
#include <opencv2/gapi/gcompiled.hpp>
|
||||
#include <opencv2/gapi/gtyped.hpp>
|
||||
#include <opencv2/gapi/gkernel.hpp>
|
||||
#include <opencv2/gapi/operators.hpp>
|
||||
|
||||
// Include this file here to avoid cyclic dependency between
|
||||
// Desync & GKernel & GComputation & GStreamingCompiled.
|
||||
#include <opencv2/gapi/streaming/desync.hpp>
|
||||
|
||||
#endif // OPENCV_GAPI_HPP
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
//
|
||||
// Copyright (C) 2018 Intel Corporation
|
||||
// Copyright (C) 2018-2020 Intel Corporation
|
||||
|
||||
|
||||
#ifndef OPENCV_GAPI_CORE_HPP
|
||||
@@ -31,7 +31,7 @@ namespace core {
|
||||
using GMat2 = std::tuple<GMat,GMat>;
|
||||
using GMat3 = std::tuple<GMat,GMat,GMat>; // FIXME: how to avoid this?
|
||||
using GMat4 = std::tuple<GMat,GMat,GMat,GMat>;
|
||||
using GMatScalar = std::tuple<GMat, GScalar>;
|
||||
using GMatScalar = std::tuple<GMat, GScalar>;
|
||||
|
||||
G_TYPED_KERNEL(GAdd, <GMat(GMat, GMat, int)>, "org.opencv.core.math.add") {
|
||||
static GMatDesc outMeta(GMatDesc a, GMatDesc b, int ddepth) {
|
||||
@@ -308,6 +308,13 @@ namespace core {
|
||||
}
|
||||
};
|
||||
|
||||
G_TYPED_KERNEL(GCountNonZero, <GOpaque<int>(GMat)>, "org.opencv.core.matrixop.countNonZero") {
|
||||
static GOpaqueDesc outMeta(GMatDesc in) {
|
||||
GAPI_Assert(in.chan == 1);
|
||||
return empty_gopaque_desc();
|
||||
}
|
||||
};
|
||||
|
||||
G_TYPED_KERNEL(GAddW, <GMat(GMat, double, GMat, double, double, int)>, "org.opencv.core.matrixop.addweighted") {
|
||||
static GMatDesc outMeta(GMatDesc a, double, GMatDesc b, double, double, int ddepth) {
|
||||
if (ddepth == -1)
|
||||
@@ -392,10 +399,10 @@ namespace core {
|
||||
}
|
||||
else
|
||||
{
|
||||
GAPI_Assert(fx != 0. && fy != 0.);
|
||||
return in.withSize
|
||||
(Size(static_cast<int>(round(in.size.width * fx)),
|
||||
static_cast<int>(round(in.size.height * fy))));
|
||||
int outSz_w = static_cast<int>(round(in.size.width * fx));
|
||||
int outSz_h = static_cast<int>(round(in.size.height * fy));
|
||||
GAPI_Assert(outSz_w > 0 && outSz_h > 0);
|
||||
return in.withSize(Size(outSz_w, outSz_h));
|
||||
}
|
||||
}
|
||||
};
|
||||
@@ -501,7 +508,23 @@ namespace core {
|
||||
return in.withType(in.depth, in.chan).withSize(dsize);
|
||||
}
|
||||
};
|
||||
}
|
||||
} // namespace core
|
||||
|
||||
namespace streaming {
|
||||
|
||||
// Operations for Streaming (declared in this header for convenience)
|
||||
G_TYPED_KERNEL(GSize, <GOpaque<Size>(GMat)>, "org.opencv.streaming.size") {
|
||||
static GOpaqueDesc outMeta(const GMatDesc&) {
|
||||
return empty_gopaque_desc();
|
||||
}
|
||||
};
|
||||
|
||||
G_TYPED_KERNEL(GSizeR, <GOpaque<Size>(GOpaque<Rect>)>, "org.opencv.streaming.sizeR") {
|
||||
static GOpaqueDesc outMeta(const GOpaqueDesc&) {
|
||||
return empty_gopaque_desc();
|
||||
}
|
||||
};
|
||||
} // namespace streaming
|
||||
|
||||
//! @addtogroup gapi_math
|
||||
//! @{
|
||||
@@ -528,7 +551,7 @@ Supported matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref
|
||||
@param ddepth optional depth of the output matrix.
|
||||
@sa sub, addWeighted
|
||||
*/
|
||||
GAPI_EXPORTS GMat add(const GMat& src1, const GMat& src2, int ddepth = -1);
|
||||
GAPI_EXPORTS_W GMat add(const GMat& src1, const GMat& src2, int ddepth = -1);
|
||||
|
||||
/** @brief Calculates the per-element sum of matrix and given scalar.
|
||||
|
||||
@@ -743,8 +766,9 @@ Supported matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref
|
||||
|
||||
@note Function textual ID is "org.opencv.core.math.mean"
|
||||
@param src input matrix.
|
||||
@sa countNonZero, min, max
|
||||
*/
|
||||
GAPI_EXPORTS GScalar mean(const GMat& src);
|
||||
GAPI_EXPORTS_W GScalar mean(const GMat& src);
|
||||
|
||||
/** @brief Calculates x and y coordinates of 2D vectors from their magnitude and angle.
|
||||
|
||||
@@ -844,7 +868,7 @@ Supported input matrix data types are @ref CV_8UC1, @ref CV_16UC1, @ref CV_16SC1
|
||||
@note Function textual ID is "org.opencv.core.pixelwise.compare.cmpGT"
|
||||
@param src1 first input matrix.
|
||||
@param src2 second input matrix/scalar of the same depth as first input matrix.
|
||||
@sa min, max, threshold, cmpLE, cmpGE, cmpLS
|
||||
@sa min, max, threshold, cmpLE, cmpGE, cmpLT
|
||||
*/
|
||||
GAPI_EXPORTS GMat cmpGT(const GMat& src1, const GMat& src2);
|
||||
/** @overload
|
||||
@@ -896,7 +920,7 @@ Supported input matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1,
|
||||
@note Function textual ID is "org.opencv.core.pixelwise.compare.cmpGE"
|
||||
@param src1 first input matrix.
|
||||
@param src2 second input matrix/scalar of the same depth as first input matrix.
|
||||
@sa min, max, threshold, cmpLE, cmpGT, cmpLS
|
||||
@sa min, max, threshold, cmpLE, cmpGT, cmpLT
|
||||
*/
|
||||
GAPI_EXPORTS GMat cmpGE(const GMat& src1, const GMat& src2);
|
||||
/** @overload
|
||||
@@ -922,7 +946,7 @@ Supported input matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1,
|
||||
@note Function textual ID is "org.opencv.core.pixelwise.compare.cmpLE"
|
||||
@param src1 first input matrix.
|
||||
@param src2 second input matrix/scalar of the same depth as first input matrix.
|
||||
@sa min, max, threshold, cmpGT, cmpGE, cmpLS
|
||||
@sa min, max, threshold, cmpGT, cmpGE, cmpLT
|
||||
*/
|
||||
GAPI_EXPORTS GMat cmpLE(const GMat& src1, const GMat& src2);
|
||||
/** @overload
|
||||
@@ -1000,7 +1024,7 @@ Supported matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref
|
||||
*/
|
||||
GAPI_EXPORTS GMat bitwise_and(const GMat& src1, const GMat& src2);
|
||||
/** @overload
|
||||
@note Function textual ID is "org.opencv.core.pixelwise.compare.bitwise_andS"
|
||||
@note Function textual ID is "org.opencv.core.pixelwise.bitwise_andS"
|
||||
@param src1 first input matrix.
|
||||
@param src2 scalar, which will be per-lemenetly conjuncted with elements of src1.
|
||||
*/
|
||||
@@ -1024,7 +1048,7 @@ Supported matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref
|
||||
*/
|
||||
GAPI_EXPORTS GMat bitwise_or(const GMat& src1, const GMat& src2);
|
||||
/** @overload
|
||||
@note Function textual ID is "org.opencv.core.pixelwise.compare.bitwise_orS"
|
||||
@note Function textual ID is "org.opencv.core.pixelwise.bitwise_orS"
|
||||
@param src1 first input matrix.
|
||||
@param src2 scalar, which will be per-lemenetly disjuncted with elements of src1.
|
||||
*/
|
||||
@@ -1049,7 +1073,7 @@ Supported matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref
|
||||
*/
|
||||
GAPI_EXPORTS GMat bitwise_xor(const GMat& src1, const GMat& src2);
|
||||
/** @overload
|
||||
@note Function textual ID is "org.opencv.core.pixelwise.compare.bitwise_xorS"
|
||||
@note Function textual ID is "org.opencv.core.pixelwise.bitwise_xorS"
|
||||
@param src1 first input matrix.
|
||||
@param src2 scalar, for which per-lemenet "logical or" operation on elements of src1 will be performed.
|
||||
*/
|
||||
@@ -1109,7 +1133,7 @@ Supported input matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1,
|
||||
@note Function textual ID is "org.opencv.core.matrixop.min"
|
||||
@param src1 first input matrix.
|
||||
@param src2 second input matrix of the same size and depth as src1.
|
||||
@sa max, compareEqual, compareLess, compareLessEqual
|
||||
@sa max, cmpEQ, cmpLT, cmpLE
|
||||
*/
|
||||
GAPI_EXPORTS GMat min(const GMat& src1, const GMat& src2);
|
||||
|
||||
@@ -1126,7 +1150,7 @@ Supported matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref
|
||||
@note Function textual ID is "org.opencv.core.matrixop.max"
|
||||
@param src1 first input matrix.
|
||||
@param src2 second input matrix of the same size and depth as src1.
|
||||
@sa min, compare, compareEqual, compareGreater, compareGreaterEqual
|
||||
@sa min, compare, cmpEQ, cmpGT, cmpGE
|
||||
*/
|
||||
GAPI_EXPORTS GMat max(const GMat& src1, const GMat& src2);
|
||||
|
||||
@@ -1172,10 +1196,23 @@ Supported matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref
|
||||
|
||||
@note Function textual ID is "org.opencv.core.matrixop.sum"
|
||||
@param src input matrix.
|
||||
@sa min, max
|
||||
@sa countNonZero, mean, min, max
|
||||
*/
|
||||
GAPI_EXPORTS GScalar sum(const GMat& src);
|
||||
|
||||
/** @brief Counts non-zero array elements.
|
||||
|
||||
The function returns the number of non-zero elements in src :
|
||||
\f[\sum _{I: \; \texttt{src} (I) \ne0 } 1\f]
|
||||
|
||||
Supported matrix data types are @ref CV_8UC1, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.
|
||||
|
||||
@note Function textual ID is "org.opencv.core.matrixop.countNonZero"
|
||||
@param src input single-channel matrix.
|
||||
@sa mean, min, max
|
||||
*/
|
||||
GAPI_EXPORTS GOpaque<int> countNonZero(const GMat& src);
|
||||
|
||||
/** @brief Calculates the weighted sum of two matrices.
|
||||
|
||||
The function addWeighted calculates the weighted sum of two matrices as follows:
|
||||
@@ -1312,14 +1349,14 @@ Output matrix must be of the same size and depth as src.
|
||||
types.
|
||||
@param type thresholding type (see the cv::ThresholdTypes).
|
||||
|
||||
@sa min, max, cmpGT, cmpLE, cmpGE, cmpLS
|
||||
@sa min, max, cmpGT, cmpLE, cmpGE, cmpLT
|
||||
*/
|
||||
GAPI_EXPORTS GMat threshold(const GMat& src, const GScalar& thresh, const GScalar& maxval, int type);
|
||||
/** @overload
|
||||
This function applicable for all threshold types except CV_THRESH_OTSU and CV_THRESH_TRIANGLE
|
||||
@note Function textual ID is "org.opencv.core.matrixop.thresholdOT"
|
||||
*/
|
||||
GAPI_EXPORTS std::tuple<GMat, GScalar> threshold(const GMat& src, const GScalar& maxval, int type);
|
||||
GAPI_EXPORTS_W std::tuple<GMat, GScalar> threshold(const GMat& src, const GScalar& maxval, int type);
|
||||
|
||||
/** @brief Applies a range-level threshold to each matrix element.
|
||||
|
||||
@@ -1434,7 +1471,7 @@ All output matrices must be in @ref CV_8UC1.
|
||||
@sa merge3, merge4
|
||||
*/
|
||||
GAPI_EXPORTS std::tuple<GMat, GMat, GMat,GMat> split4(const GMat& src);
|
||||
GAPI_EXPORTS std::tuple<GMat, GMat, GMat> split3(const GMat& src);
|
||||
GAPI_EXPORTS_W std::tuple<GMat, GMat, GMat> split3(const GMat& src);
|
||||
|
||||
/** @brief Applies a generic geometrical transformation to an image.
|
||||
|
||||
@@ -1458,8 +1495,8 @@ Output image must be of the same size and depth as input one.
|
||||
CV_32FC1, or CV_32FC2.
|
||||
@param map2 The second map of y values having the type CV_16UC1, CV_32FC1, or none (empty map
|
||||
if map1 is (x,y) points), respectively.
|
||||
@param interpolation Interpolation method (see cv::InterpolationFlags). The method INTER_AREA is
|
||||
not supported by this function.
|
||||
@param interpolation Interpolation method (see cv::InterpolationFlags). The methods #INTER_AREA
|
||||
and #INTER_LINEAR_EXACT are not supported by this function.
|
||||
@param borderMode Pixel extrapolation method (see cv::BorderTypes). When
|
||||
borderMode=BORDER_TRANSPARENT, it means that the pixels in the destination image that
|
||||
corresponds to the "outliers" in the source image are not modified by the function.
|
||||
@@ -1720,6 +1757,26 @@ GAPI_EXPORTS GMat warpAffine(const GMat& src, const Mat& M, const Size& dsize, i
|
||||
int borderMode = cv::BORDER_CONSTANT, const Scalar& borderValue = Scalar());
|
||||
//! @} gapi_transform
|
||||
|
||||
namespace streaming {
|
||||
/** @brief Gets dimensions from Mat.
|
||||
|
||||
@note Function textual ID is "org.opencv.streaming.size"
|
||||
|
||||
@param src Input tensor
|
||||
@return Size (tensor dimensions).
|
||||
*/
|
||||
GAPI_EXPORTS GOpaque<Size> size(const GMat& src);
|
||||
|
||||
/** @overload
|
||||
Gets dimensions from rectangle.
|
||||
|
||||
@note Function textual ID is "org.opencv.streaming.sizeR"
|
||||
|
||||
@param r Input rectangle.
|
||||
@return Size (rectangle dimensions).
|
||||
*/
|
||||
GAPI_EXPORTS GOpaque<Size> size(const GOpaque<Rect>& r);
|
||||
} //namespace streaming
|
||||
} //namespace gapi
|
||||
} //namespace cv
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ namespace gapi {
|
||||
namespace core {
|
||||
namespace cpu {
|
||||
|
||||
GAPI_EXPORTS GKernelPackage kernels();
|
||||
GAPI_EXPORTS_W cv::gapi::GKernelPackage kernels();
|
||||
|
||||
} // namespace cpu
|
||||
} // namespace core
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
//
|
||||
// Copyright (C) 2018-2019 Intel Corporation
|
||||
// Copyright (C) 2018-2020 Intel Corporation
|
||||
|
||||
|
||||
#ifndef OPENCV_GAPI_GCPUKERNEL_HPP
|
||||
@@ -17,7 +17,7 @@
|
||||
#include <opencv2/gapi/gcommon.hpp>
|
||||
#include <opencv2/gapi/gkernel.hpp>
|
||||
#include <opencv2/gapi/garg.hpp>
|
||||
#include <opencv2/gapi/own/convert.hpp> //to_ocv
|
||||
#include <opencv2/gapi/gmetaarg.hpp>
|
||||
#include <opencv2/gapi/util/compiler_hints.hpp> //suppress_unused_warning
|
||||
#include <opencv2/gapi/util/util.hpp>
|
||||
|
||||
@@ -96,8 +96,8 @@ public:
|
||||
const T& inArg(int input) { return m_args.at(input).get<T>(); }
|
||||
|
||||
// Syntax sugar
|
||||
const cv::gapi::own::Mat& inMat(int input);
|
||||
cv::gapi::own::Mat& outMatR(int output); // FIXME: Avoid cv::gapi::own::Mat m = ctx.outMatR()
|
||||
const cv::Mat& inMat(int input);
|
||||
cv::Mat& outMatR(int output); // FIXME: Avoid cv::Mat m = ctx.outMatR()
|
||||
|
||||
const cv::Scalar& inVal(int input);
|
||||
cv::Scalar& outValR(int output); // FIXME: Avoid cv::Scalar s = ctx.outValR()
|
||||
@@ -110,11 +110,17 @@ public:
|
||||
return outOpaqueRef(output).wref<T>();
|
||||
}
|
||||
|
||||
GArg state()
|
||||
{
|
||||
return m_state;
|
||||
}
|
||||
|
||||
protected:
|
||||
detail::VectorRef& outVecRef(int output);
|
||||
detail::OpaqueRef& outOpaqueRef(int output);
|
||||
|
||||
std::vector<GArg> m_args;
|
||||
GArg m_state;
|
||||
|
||||
//FIXME: avoid conversion of arguments from internal representation to OpenCV one on each call
|
||||
//to OCV kernel. (This can be achieved by a two single time conversions in GCPUExecutable::run,
|
||||
@@ -128,16 +134,19 @@ protected:
|
||||
class GAPI_EXPORTS GCPUKernel
|
||||
{
|
||||
public:
|
||||
// This function is kernel's execution entry point (does the processing work)
|
||||
using F = std::function<void(GCPUContext &)>;
|
||||
// This function is a kernel's execution entry point (does the processing work)
|
||||
using RunF = std::function<void(GCPUContext &)>;
|
||||
// This function is a stateful kernel's setup routine (configures state)
|
||||
using SetupF = std::function<void(const GMetaArgs &, const GArgs &,
|
||||
GArg &, const GCompileArgs &)>;
|
||||
|
||||
GCPUKernel();
|
||||
explicit GCPUKernel(const F& f);
|
||||
GCPUKernel(const RunF& runF, const SetupF& setupF = nullptr);
|
||||
|
||||
void apply(GCPUContext &ctx);
|
||||
RunF m_runF = nullptr;
|
||||
SetupF m_setupF = nullptr;
|
||||
|
||||
protected:
|
||||
F m_f;
|
||||
bool m_isStateful = false;
|
||||
};
|
||||
|
||||
// FIXME: This is an ugly ad-hoc implementation. TODO: refactor
|
||||
@@ -147,12 +156,16 @@ namespace detail
|
||||
template<class T> struct get_in;
|
||||
template<> struct get_in<cv::GMat>
|
||||
{
|
||||
static cv::Mat get(GCPUContext &ctx, int idx) { return to_ocv(ctx.inMat(idx)); }
|
||||
static cv::Mat get(GCPUContext &ctx, int idx) { return ctx.inMat(idx); }
|
||||
};
|
||||
template<> struct get_in<cv::GMatP>
|
||||
{
|
||||
static cv::Mat get(GCPUContext &ctx, int idx) { return get_in<cv::GMat>::get(ctx, idx); }
|
||||
};
|
||||
template<> struct get_in<cv::GFrame>
|
||||
{
|
||||
static cv::MediaFrame get(GCPUContext &ctx, int idx) { return ctx.inArg<cv::MediaFrame>(idx); }
|
||||
};
|
||||
template<> struct get_in<cv::GScalar>
|
||||
{
|
||||
static cv::Scalar get(GCPUContext &ctx, int idx) { return ctx.inVal(idx); }
|
||||
@@ -192,7 +205,7 @@ template<class T> struct get_in
|
||||
};
|
||||
|
||||
struct tracked_cv_mat{
|
||||
tracked_cv_mat(cv::gapi::own::Mat& m) : r{to_ocv(m)}, original_data{m.data} {}
|
||||
tracked_cv_mat(cv::Mat& m) : r{m}, original_data{m.data} {}
|
||||
cv::Mat r;
|
||||
uchar* original_data;
|
||||
|
||||
@@ -252,6 +265,17 @@ template<typename U> struct get_out<cv::GArray<U>>
|
||||
return ctx.outVecR<U>(idx);
|
||||
}
|
||||
};
|
||||
|
||||
//FIXME(dm): GArray<Mat>/GArray<GMat> conversion should be done more gracefully in the system
|
||||
template<> struct get_out<cv::GArray<cv::GMat> >: public get_out<cv::GArray<cv::Mat> >
|
||||
{
|
||||
};
|
||||
|
||||
// FIXME(dm): GArray<vector<U>>/GArray<GArray<U>> conversion should be done more gracefully in the system
|
||||
template<typename U> struct get_out<cv::GArray<cv::GArray<U>> >: public get_out<cv::GArray<std::vector<U>> >
|
||||
{
|
||||
};
|
||||
|
||||
template<typename U> struct get_out<cv::GOpaque<U>>
|
||||
{
|
||||
static U& get(GCPUContext &ctx, int idx)
|
||||
@@ -260,12 +284,73 @@ template<typename U> struct get_out<cv::GOpaque<U>>
|
||||
}
|
||||
};
|
||||
|
||||
template<typename, typename>
|
||||
struct OCVSetupHelper;
|
||||
|
||||
template<typename Impl, typename... Ins>
|
||||
struct OCVSetupHelper<Impl, std::tuple<Ins...>>
|
||||
{
|
||||
// Using 'auto' return type and 'decltype' specifier in both 'setup_impl' versions
|
||||
// to check existence of required 'Impl::setup' functions.
|
||||
// While 'decltype' specifier accepts expression we pass expression with 'comma-operator'
|
||||
// where first operand of comma-operator is call attempt to desired 'Impl::setup' and
|
||||
// the second operand is 'void()' expression.
|
||||
//
|
||||
// SFINAE for 'Impl::setup' which accepts compile arguments.
|
||||
template<int... IIs>
|
||||
static auto setup_impl(const GMetaArgs &metaArgs, const GArgs &args,
|
||||
GArg &state, const GCompileArgs &compileArgs,
|
||||
detail::Seq<IIs...>) ->
|
||||
decltype(Impl::setup(detail::get_in_meta<Ins>(metaArgs, args, IIs)...,
|
||||
std::declval<typename std::add_lvalue_reference<
|
||||
std::shared_ptr<typename Impl::State>
|
||||
>::type
|
||||
>(),
|
||||
compileArgs)
|
||||
, void())
|
||||
{
|
||||
// TODO: unique_ptr <-> shared_ptr conversion ?
|
||||
// To check: Conversion is possible only if the state which should be passed to
|
||||
// 'setup' user callback isn't required to have previous value
|
||||
std::shared_ptr<typename Impl::State> stPtr;
|
||||
Impl::setup(detail::get_in_meta<Ins>(metaArgs, args, IIs)..., stPtr, compileArgs);
|
||||
state = GArg(stPtr);
|
||||
}
|
||||
|
||||
// SFINAE for 'Impl::setup' which doesn't accept compile arguments.
|
||||
template<int... IIs>
|
||||
static auto setup_impl(const GMetaArgs &metaArgs, const GArgs &args,
|
||||
GArg &state, const GCompileArgs &/* compileArgs */,
|
||||
detail::Seq<IIs...>) ->
|
||||
decltype(Impl::setup(detail::get_in_meta<Ins>(metaArgs, args, IIs)...,
|
||||
std::declval<typename std::add_lvalue_reference<
|
||||
std::shared_ptr<typename Impl::State>
|
||||
>::type
|
||||
>()
|
||||
)
|
||||
, void())
|
||||
{
|
||||
// The same comment as in 'setup' above.
|
||||
std::shared_ptr<typename Impl::State> stPtr;
|
||||
Impl::setup(detail::get_in_meta<Ins>(metaArgs, args, IIs)..., stPtr);
|
||||
state = GArg(stPtr);
|
||||
}
|
||||
|
||||
static void setup(const GMetaArgs &metaArgs, const GArgs &args,
|
||||
GArg& state, const GCompileArgs &compileArgs)
|
||||
{
|
||||
setup_impl(metaArgs, args, state, compileArgs,
|
||||
typename detail::MkSeq<sizeof...(Ins)>::type());
|
||||
}
|
||||
};
|
||||
|
||||
// OCVCallHelper is a helper class to call stateless OCV kernels and OCV kernel functors.
|
||||
template<typename, typename, typename>
|
||||
struct OCVCallHelper;
|
||||
|
||||
// FIXME: probably can be simplified with std::apply or analogue.
|
||||
template<typename Impl, typename... Ins, typename... Outs>
|
||||
struct OCVCallHelper<Impl, std::tuple<Ins...>, std::tuple<Outs...> >
|
||||
struct OCVCallHelper<Impl, std::tuple<Ins...>, std::tuple<Outs...>>
|
||||
{
|
||||
template<typename... Inputs>
|
||||
struct call_and_postprocess
|
||||
@@ -293,19 +378,16 @@ struct OCVCallHelper<Impl, std::tuple<Ins...>, std::tuple<Outs...> >
|
||||
//by comparing it's state (data ptr) before and after the call.
|
||||
//This is done by converting each output Mat into tracked_cv_mat object, and binding
|
||||
//them to parameters of ad-hoc function
|
||||
//Convert own::Scalar to cv::Scalar before call kernel and run kernel
|
||||
//convert cv::Scalar to own::Scalar after call kernel and write back results
|
||||
call_and_postprocess<decltype(get_in<Ins>::get(ctx, IIs))...>
|
||||
::call(get_in<Ins>::get(ctx, IIs)...,
|
||||
get_out<Outs>::get(ctx, OIs)...);
|
||||
::call(get_in<Ins>::get(ctx, IIs)..., get_out<Outs>::get(ctx, OIs)...);
|
||||
}
|
||||
|
||||
template<int... IIs, int... OIs>
|
||||
static void call_impl(cv::GCPUContext &ctx, Impl& impl, detail::Seq<IIs...>, detail::Seq<OIs...>)
|
||||
static void call_impl(cv::GCPUContext &ctx, Impl& impl,
|
||||
detail::Seq<IIs...>, detail::Seq<OIs...>)
|
||||
{
|
||||
call_and_postprocess<decltype(cv::detail::get_in<Ins>::get(ctx, IIs))...>
|
||||
::call(impl, cv::detail::get_in<Ins>::get(ctx, IIs)...,
|
||||
cv::detail::get_out<Outs>::get(ctx, OIs)...);
|
||||
call_and_postprocess<decltype(get_in<Ins>::get(ctx, IIs))...>
|
||||
::call(impl, get_in<Ins>::get(ctx, IIs)..., get_out<Outs>::get(ctx, OIs)...);
|
||||
}
|
||||
|
||||
static void call(GCPUContext &ctx)
|
||||
@@ -326,30 +408,86 @@ struct OCVCallHelper<Impl, std::tuple<Ins...>, std::tuple<Outs...> >
|
||||
}
|
||||
};
|
||||
|
||||
// OCVStCallHelper is a helper class to call stateful OCV kernels.
|
||||
template<typename, typename, typename>
|
||||
struct OCVStCallHelper;
|
||||
|
||||
template<typename Impl, typename... Ins, typename... Outs>
|
||||
struct OCVStCallHelper<Impl, std::tuple<Ins...>, std::tuple<Outs...>> :
|
||||
OCVCallHelper<Impl, std::tuple<Ins...>, std::tuple<Outs...>>
|
||||
{
|
||||
template<typename... Inputs>
|
||||
struct call_and_postprocess
|
||||
{
|
||||
template<typename... Outputs>
|
||||
static void call(typename Impl::State& st, Inputs&&... ins, Outputs&&... outs)
|
||||
{
|
||||
Impl::run(std::forward<Inputs>(ins)..., outs..., st);
|
||||
postprocess(outs...);
|
||||
}
|
||||
};
|
||||
|
||||
template<int... IIs, int... OIs>
|
||||
static void call_impl(GCPUContext &ctx, detail::Seq<IIs...>, detail::Seq<OIs...>)
|
||||
{
|
||||
auto& st = *ctx.state().get<std::shared_ptr<typename Impl::State>>();
|
||||
call_and_postprocess<decltype(get_in<Ins>::get(ctx, IIs))...>
|
||||
::call(st, get_in<Ins>::get(ctx, IIs)..., get_out<Outs>::get(ctx, OIs)...);
|
||||
}
|
||||
|
||||
static void call(GCPUContext &ctx)
|
||||
{
|
||||
call_impl(ctx,
|
||||
typename detail::MkSeq<sizeof...(Ins)>::type(),
|
||||
typename detail::MkSeq<sizeof...(Outs)>::type());
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace detail
|
||||
|
||||
template<class Impl, class K>
|
||||
class GCPUKernelImpl: public cv::detail::OCVCallHelper<Impl, typename K::InArgs, typename K::OutArgs>,
|
||||
public cv::detail::KernelTag
|
||||
class GCPUKernelImpl: public cv::detail::KernelTag
|
||||
{
|
||||
using P = detail::OCVCallHelper<Impl, typename K::InArgs, typename K::OutArgs>;
|
||||
using CallHelper = cv::detail::OCVCallHelper<Impl, typename K::InArgs, typename K::OutArgs>;
|
||||
|
||||
public:
|
||||
using API = K;
|
||||
|
||||
static cv::gapi::GBackend backend() { return cv::gapi::cpu::backend(); }
|
||||
static cv::GCPUKernel kernel() { return GCPUKernel(&P::call); }
|
||||
static cv::gapi::GBackend backend() { return cv::gapi::cpu::backend(); }
|
||||
static cv::GCPUKernel kernel() { return GCPUKernel(&CallHelper::call); }
|
||||
};
|
||||
|
||||
template<class Impl, class K, class S>
|
||||
class GCPUStKernelImpl: public cv::detail::KernelTag
|
||||
{
|
||||
using StSetupHelper = detail::OCVSetupHelper<Impl, typename K::InArgs>;
|
||||
using StCallHelper = detail::OCVStCallHelper<Impl, typename K::InArgs, typename K::OutArgs>;
|
||||
|
||||
public:
|
||||
using API = K;
|
||||
using State = S;
|
||||
|
||||
static cv::gapi::GBackend backend() { return cv::gapi::cpu::backend(); }
|
||||
static cv::GCPUKernel kernel() { return GCPUKernel(&StCallHelper::call,
|
||||
&StSetupHelper::setup); }
|
||||
};
|
||||
|
||||
#define GAPI_OCV_KERNEL(Name, API) struct Name: public cv::GCPUKernelImpl<Name, API>
|
||||
|
||||
// TODO: Reuse Anatoliy's logic for support of types with commas in macro.
|
||||
// Retrieve the common part from Anatoliy's logic to the separate place.
|
||||
#define GAPI_OCV_KERNEL_ST(Name, API, State) \
|
||||
struct Name: public cv::GCPUStKernelImpl<Name, API, State> \
|
||||
|
||||
|
||||
class gapi::cpu::GOCVFunctor : public gapi::GFunctor
|
||||
{
|
||||
public:
|
||||
using Impl = std::function<void(GCPUContext &)>;
|
||||
using Meta = cv::GKernel::M;
|
||||
|
||||
GOCVFunctor(const char* id, const Impl& impl)
|
||||
: gapi::GFunctor(id), impl_{GCPUKernel(impl)}
|
||||
GOCVFunctor(const char* id, const Meta &meta, const Impl& impl)
|
||||
: gapi::GFunctor(id), impl_{GCPUKernel(impl), meta}
|
||||
{
|
||||
}
|
||||
|
||||
@@ -364,15 +502,21 @@ private:
|
||||
template<typename K, typename Callable>
|
||||
gapi::cpu::GOCVFunctor gapi::cpu::ocv_kernel(Callable& c)
|
||||
{
|
||||
using P = detail::OCVCallHelper<Callable, typename K::InArgs, typename K::OutArgs>;
|
||||
return GOCVFunctor(K::id(), std::bind(&P::callFunctor, std::placeholders::_1, std::ref(c)));
|
||||
using P = cv::detail::OCVCallHelper<Callable, typename K::InArgs, typename K::OutArgs>;
|
||||
return GOCVFunctor{ K::id()
|
||||
, &K::getOutMeta
|
||||
, std::bind(&P::callFunctor, std::placeholders::_1, std::ref(c))
|
||||
};
|
||||
}
|
||||
|
||||
template<typename K, typename Callable>
|
||||
gapi::cpu::GOCVFunctor gapi::cpu::ocv_kernel(const Callable& c)
|
||||
{
|
||||
using P = detail::OCVCallHelper<Callable, typename K::InArgs, typename K::OutArgs>;
|
||||
return GOCVFunctor(K::id(), std::bind(&P::callFunctor, std::placeholders::_1, c));
|
||||
using P = cv::detail::OCVCallHelper<Callable, typename K::InArgs, typename K::OutArgs>;
|
||||
return GOCVFunctor{ K::id()
|
||||
, &K::getOutMeta
|
||||
, std::bind(&P::callFunctor, std::placeholders::_1, c)
|
||||
};
|
||||
}
|
||||
//! @endcond
|
||||
|
||||
|
||||
25
inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/cpu/video.hpp
vendored
Normal file
25
inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/cpu/video.hpp
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
//
|
||||
// Copyright (C) 2020 Intel Corporation
|
||||
|
||||
#ifndef OPENCV_GAPI_CPU_VIDEO_API_HPP
|
||||
#define OPENCV_GAPI_CPU_VIDEO_API_HPP
|
||||
|
||||
#include <opencv2/gapi/gkernel.hpp> // GKernelPackage
|
||||
|
||||
namespace cv {
|
||||
namespace gapi {
|
||||
namespace video {
|
||||
namespace cpu {
|
||||
|
||||
GAPI_EXPORTS GKernelPackage kernels();
|
||||
|
||||
} // namespace cpu
|
||||
} // namespace video
|
||||
} // namespace gapi
|
||||
} // namespace cv
|
||||
|
||||
|
||||
#endif // OPENCV_GAPI_CPU_VIDEO_API_HPP
|
||||
@@ -13,7 +13,7 @@
|
||||
|
||||
namespace cv { namespace gapi { namespace core { namespace fluid {
|
||||
|
||||
GAPI_EXPORTS GKernelPackage kernels();
|
||||
GAPI_EXPORTS_W cv::gapi::GKernelPackage kernels();
|
||||
|
||||
}}}}
|
||||
|
||||
|
||||
@@ -14,11 +14,9 @@
|
||||
#include <cstdint> // uint8_t
|
||||
|
||||
#include <opencv2/gapi/opencv_includes.hpp>
|
||||
#include <opencv2/gapi/own/mat.hpp>
|
||||
#include <opencv2/gapi/gmat.hpp>
|
||||
|
||||
#include <opencv2/gapi/util/optional.hpp>
|
||||
#include <opencv2/gapi/own/mat.hpp>
|
||||
|
||||
namespace cv {
|
||||
namespace gapi {
|
||||
@@ -58,8 +56,6 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
View() = default;
|
||||
|
||||
const inline uint8_t* InLineB(int index) const // -(w-1)/2...0...+(w-1)/2 for Filters
|
||||
{
|
||||
return m_cache->linePtr(index);
|
||||
@@ -82,11 +78,15 @@ public:
|
||||
Priv& priv(); // internal use only
|
||||
const Priv& priv() const; // internal use only
|
||||
|
||||
View(Priv* p);
|
||||
View();
|
||||
View(std::unique_ptr<Priv>&& p);
|
||||
View(View&& v);
|
||||
View& operator=(View&& v);
|
||||
~View();
|
||||
|
||||
private:
|
||||
std::shared_ptr<Priv> m_priv;
|
||||
const Cache* m_cache;
|
||||
std::unique_ptr<Priv> m_priv;
|
||||
const Cache* m_cache = nullptr;
|
||||
};
|
||||
|
||||
class GAPI_EXPORTS Buffer
|
||||
@@ -111,7 +111,9 @@ public:
|
||||
int wlpi,
|
||||
BorderOpt border);
|
||||
// Constructor for in/out buffers (for tests)
|
||||
Buffer(const cv::gapi::own::Mat &data, bool is_input);
|
||||
Buffer(const cv::Mat &data, bool is_input);
|
||||
~Buffer();
|
||||
Buffer& operator=(Buffer&&);
|
||||
|
||||
inline uint8_t* OutLineB(int index = 0)
|
||||
{
|
||||
@@ -134,13 +136,14 @@ public:
|
||||
inline const GMatDesc& meta() const { return m_cache->m_desc; }
|
||||
|
||||
View mkView(int borderSize, bool ownStorage);
|
||||
void addView(const View* v);
|
||||
|
||||
class GAPI_EXPORTS Priv; // internal use only
|
||||
Priv& priv(); // internal use only
|
||||
const Priv& priv() const; // internal use only
|
||||
|
||||
private:
|
||||
std::shared_ptr<Priv> m_priv;
|
||||
std::unique_ptr<Priv> m_priv;
|
||||
const Cache* m_cache;
|
||||
};
|
||||
|
||||
|
||||
@@ -171,7 +171,7 @@ template<> struct fluid_get_in<cv::GMat>
|
||||
{
|
||||
static const cv::gapi::fluid::View& get(const cv::GArgs &in_args, int idx)
|
||||
{
|
||||
return in_args[idx].unsafe_get<cv::gapi::fluid::View>();
|
||||
return *in_args[idx].unsafe_get<cv::gapi::fluid::View*>();
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@@ -2,18 +2,21 @@
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
//
|
||||
// Copyright (C) 2018 Intel Corporation
|
||||
// Copyright (C) 2018-2020 Intel Corporation
|
||||
|
||||
|
||||
#ifndef OPENCV_GAPI_GARG_HPP
|
||||
#define OPENCV_GAPI_GARG_HPP
|
||||
|
||||
#include <vector>
|
||||
#include <unordered_map>
|
||||
#include <type_traits>
|
||||
|
||||
#include <opencv2/gapi/opencv_includes.hpp>
|
||||
#include <opencv2/gapi/own/mat.hpp>
|
||||
#include <opencv2/gapi/media.hpp>
|
||||
|
||||
#include <opencv2/gapi/util/util.hpp>
|
||||
#include <opencv2/gapi/util/any.hpp>
|
||||
#include <opencv2/gapi/util/variant.hpp>
|
||||
|
||||
@@ -21,9 +24,11 @@
|
||||
#include <opencv2/gapi/gscalar.hpp>
|
||||
#include <opencv2/gapi/garray.hpp>
|
||||
#include <opencv2/gapi/gopaque.hpp>
|
||||
#include <opencv2/gapi/gframe.hpp>
|
||||
#include <opencv2/gapi/gtype_traits.hpp>
|
||||
#include <opencv2/gapi/gmetaarg.hpp>
|
||||
#include <opencv2/gapi/streaming/source.hpp>
|
||||
#include <opencv2/gapi/rmat.hpp>
|
||||
|
||||
namespace cv {
|
||||
|
||||
@@ -46,6 +51,7 @@ public:
|
||||
template<typename T, typename std::enable_if<!detail::is_garg<T>::value, int>::type = 0>
|
||||
explicit GArg(const T &t)
|
||||
: kind(detail::GTypeTraits<T>::kind)
|
||||
, opaque_kind(detail::GOpaqueTraits<T>::kind)
|
||||
, value(detail::wrap_gapi_helper<T>::wrap(t))
|
||||
{
|
||||
}
|
||||
@@ -53,6 +59,7 @@ public:
|
||||
template<typename T, typename std::enable_if<!detail::is_garg<T>::value, int>::type = 0>
|
||||
explicit GArg(T &&t)
|
||||
: kind(detail::GTypeTraits<typename std::decay<T>::type>::kind)
|
||||
, opaque_kind(detail::GOpaqueTraits<typename std::decay<T>::type>::kind)
|
||||
, value(detail::wrap_gapi_helper<T>::wrap(t))
|
||||
{
|
||||
}
|
||||
@@ -78,6 +85,7 @@ public:
|
||||
}
|
||||
|
||||
detail::ArgKind kind = detail::ArgKind::OPAQUE_VAL;
|
||||
detail::OpaqueKind opaque_kind = detail::OpaqueKind::CV_UNKNOWN;
|
||||
|
||||
protected:
|
||||
util::any value;
|
||||
@@ -87,29 +95,104 @@ using GArgs = std::vector<GArg>;
|
||||
|
||||
// FIXME: Express as M<GProtoArg...>::type
|
||||
// FIXME: Move to a separate file!
|
||||
using GRunArg = util::variant<
|
||||
using GRunArgBase = util::variant<
|
||||
#if !defined(GAPI_STANDALONE)
|
||||
cv::Mat,
|
||||
cv::UMat,
|
||||
#endif // !defined(GAPI_STANDALONE)
|
||||
cv::RMat,
|
||||
cv::gapi::wip::IStreamSource::Ptr,
|
||||
cv::gapi::own::Mat,
|
||||
cv::Mat,
|
||||
cv::Scalar,
|
||||
cv::detail::VectorRef,
|
||||
cv::detail::OpaqueRef
|
||||
cv::detail::OpaqueRef,
|
||||
cv::MediaFrame
|
||||
>;
|
||||
|
||||
namespace detail {
|
||||
template<typename,typename>
|
||||
struct in_variant;
|
||||
|
||||
template<typename T, typename... Types>
|
||||
struct in_variant<T, util::variant<Types...> >
|
||||
: std::integral_constant<bool, cv::detail::contains<T, Types...>::value > {
|
||||
};
|
||||
} // namespace detail
|
||||
|
||||
struct GAPI_EXPORTS GRunArg: public GRunArgBase
|
||||
{
|
||||
// Metadata information here
|
||||
using Meta = std::unordered_map<std::string, util::any>;
|
||||
Meta meta;
|
||||
|
||||
// Mimic the old GRunArg semantics here, old of the times when
|
||||
// GRunArg was an alias to variant<>
|
||||
GRunArg();
|
||||
GRunArg(const cv::GRunArg &arg);
|
||||
GRunArg(cv::GRunArg &&arg);
|
||||
|
||||
GRunArg& operator= (const GRunArg &arg);
|
||||
GRunArg& operator= (GRunArg &&arg);
|
||||
|
||||
template <typename T>
|
||||
GRunArg(const T &t,
|
||||
const Meta &m = Meta{},
|
||||
typename std::enable_if< detail::in_variant<T, GRunArgBase>::value, int>::type = 0)
|
||||
: GRunArgBase(t)
|
||||
, meta(m)
|
||||
{
|
||||
}
|
||||
template <typename T>
|
||||
GRunArg(T &&t,
|
||||
const Meta &m = Meta{},
|
||||
typename std::enable_if< detail::in_variant<T, GRunArgBase>::value, int>::type = 0)
|
||||
: GRunArgBase(std::move(t))
|
||||
, meta(m)
|
||||
{
|
||||
}
|
||||
template <typename T> auto operator= (const T &t)
|
||||
-> typename std::enable_if< detail::in_variant<T, GRunArgBase>::value, cv::GRunArg>::type&
|
||||
{
|
||||
GRunArgBase::operator=(t);
|
||||
return *this;
|
||||
}
|
||||
template <typename T> auto operator= (T&& t)
|
||||
-> typename std::enable_if< detail::in_variant<T, GRunArgBase>::value, cv::GRunArg>::type&
|
||||
{
|
||||
GRunArgBase::operator=(std::move(t));
|
||||
return *this;
|
||||
}
|
||||
};
|
||||
using GRunArgs = std::vector<GRunArg>;
|
||||
|
||||
// TODO: Think about the addition operator
|
||||
/**
|
||||
* @brief This operator allows to complement the input vector at runtime.
|
||||
*
|
||||
* It's an ordinary overload of addition assignment operator.
|
||||
*
|
||||
* Example of usage:
|
||||
* @snippet dynamic_graph.cpp GRunArgs usage
|
||||
*
|
||||
*/
|
||||
inline GRunArgs& operator += (GRunArgs &lhs, const GRunArgs &rhs)
|
||||
{
|
||||
lhs.reserve(lhs.size() + rhs.size());
|
||||
lhs.insert(lhs.end(), rhs.begin(), rhs.end());
|
||||
return lhs;
|
||||
}
|
||||
|
||||
namespace gapi
|
||||
{
|
||||
namespace wip
|
||||
{
|
||||
/**
|
||||
* @brief This aggregate type represents all types which G-API can handle (via variant).
|
||||
* @brief This aggregate type represents all types which G-API can
|
||||
* handle (via variant).
|
||||
*
|
||||
* It only exists to overcome C++ language limitations (where a `using`-defined class can't be forward-declared).
|
||||
* It only exists to overcome C++ language limitations (where a
|
||||
* `using`-defined class can't be forward-declared).
|
||||
*/
|
||||
struct Data: public GRunArg
|
||||
struct GAPI_EXPORTS Data: public GRunArg
|
||||
{
|
||||
using GRunArg::GRunArg;
|
||||
template <typename T>
|
||||
@@ -122,16 +205,39 @@ struct Data: public GRunArg
|
||||
|
||||
using GRunArgP = util::variant<
|
||||
#if !defined(GAPI_STANDALONE)
|
||||
cv::Mat*,
|
||||
cv::UMat*,
|
||||
#endif // !defined(GAPI_STANDALONE)
|
||||
cv::gapi::own::Mat*,
|
||||
cv::Mat*,
|
||||
cv::RMat*,
|
||||
cv::Scalar*,
|
||||
cv::detail::VectorRef,
|
||||
cv::detail::OpaqueRef
|
||||
>;
|
||||
using GRunArgsP = std::vector<GRunArgP>;
|
||||
|
||||
// TODO: Think about the addition operator
|
||||
/**
|
||||
* @brief This operator allows to complement the output vector at runtime.
|
||||
*
|
||||
* It's an ordinary overload of addition assignment operator.
|
||||
*
|
||||
* Example of usage:
|
||||
* @snippet dynamic_graph.cpp GRunArgsP usage
|
||||
*
|
||||
*/
|
||||
inline GRunArgsP& operator += (GRunArgsP &lhs, const GRunArgsP &rhs)
|
||||
{
|
||||
lhs.reserve(lhs.size() + rhs.size());
|
||||
lhs.insert(lhs.end(), rhs.begin(), rhs.end());
|
||||
return lhs;
|
||||
}
|
||||
|
||||
namespace gapi
|
||||
{
|
||||
GAPI_EXPORTS cv::GRunArgsP bind(cv::GRunArgs &results);
|
||||
GAPI_EXPORTS cv::GRunArg bind(cv::GRunArgP &out); // FIXME: think more about it
|
||||
}
|
||||
|
||||
template<typename... Ts> inline GRunArgs gin(const Ts&... args)
|
||||
{
|
||||
return GRunArgs{ GRunArg(detail::wrap_host_helper<Ts>::wrap_in(args))... };
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
//
|
||||
// Copyright (C) 2018 Intel Corporation
|
||||
// Copyright (C) 2018-2020 Intel Corporation
|
||||
|
||||
|
||||
#ifndef OPENCV_GAPI_GARRAY_HPP
|
||||
@@ -29,7 +29,6 @@ namespace cv
|
||||
// (user-inaccessible) classes.
|
||||
class GNode;
|
||||
struct GOrigin;
|
||||
|
||||
template<typename T> class GArray;
|
||||
|
||||
/**
|
||||
@@ -81,6 +80,7 @@ namespace detail
|
||||
|
||||
protected:
|
||||
GArrayU(); // Default constructor
|
||||
GArrayU(const detail::VectorRef& vref); // Constant value constructor
|
||||
template<class> friend class cv::GArray; // (available to GArray<T> only)
|
||||
|
||||
void setConstructFcn(ConstructVec &&cv); // Store T-aware constructor
|
||||
@@ -88,6 +88,11 @@ namespace detail
|
||||
template <typename T>
|
||||
void specifyType(); // Store type of initial GArray<T>
|
||||
|
||||
template <typename T>
|
||||
void storeKind();
|
||||
|
||||
void setKind(cv::detail::OpaqueKind);
|
||||
|
||||
std::shared_ptr<GOrigin> m_priv;
|
||||
std::shared_ptr<TypeHintBase> m_hint;
|
||||
};
|
||||
@@ -104,6 +109,11 @@ namespace detail
|
||||
m_hint.reset(new TypeHint<typename std::decay<T>::type>);
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
void GArrayU::storeKind(){
|
||||
setKind(cv::detail::GOpaqueTraits<T>::kind);
|
||||
};
|
||||
|
||||
// This class represents a typed STL vector reference.
|
||||
// Depending on origins, this reference may be either "just a" reference to
|
||||
// an object created externally, OR actually own the underlying object
|
||||
@@ -111,11 +121,14 @@ namespace detail
|
||||
class BasicVectorRef
|
||||
{
|
||||
public:
|
||||
// These fields are set by the derived class(es)
|
||||
std::size_t m_elemSize = 0ul;
|
||||
cv::GArrayDesc m_desc;
|
||||
virtual ~BasicVectorRef() {}
|
||||
|
||||
virtual void mov(BasicVectorRef &ref) = 0;
|
||||
virtual const void* ptr() const = 0;
|
||||
virtual std::size_t size() const = 0;
|
||||
};
|
||||
|
||||
template<typename T> class VectorRefT final: public BasicVectorRef
|
||||
@@ -208,6 +221,9 @@ namespace detail
|
||||
GAPI_Assert(tv != nullptr);
|
||||
wref() = std::move(tv->wref());
|
||||
}
|
||||
|
||||
virtual const void* ptr() const override { return &rref(); }
|
||||
virtual std::size_t size() const override { return rref().size(); }
|
||||
};
|
||||
|
||||
// This class strips type information from VectorRefT<> and makes it usable
|
||||
@@ -220,6 +236,7 @@ namespace detail
|
||||
class VectorRef
|
||||
{
|
||||
std::shared_ptr<BasicVectorRef> m_ref;
|
||||
cv::detail::OpaqueKind m_kind;
|
||||
|
||||
template<typename T> inline void check() const
|
||||
{
|
||||
@@ -229,18 +246,32 @@ namespace detail
|
||||
|
||||
public:
|
||||
VectorRef() = default;
|
||||
template<typename T> explicit VectorRef(const std::vector<T>& vec) : m_ref(new VectorRefT<T>(vec)) {}
|
||||
template<typename T> explicit VectorRef(std::vector<T>& vec) : m_ref(new VectorRefT<T>(vec)) {}
|
||||
template<typename T> explicit VectorRef(std::vector<T>&& vec) : m_ref(new VectorRefT<T>(vec)) {}
|
||||
template<typename T> explicit VectorRef(const std::vector<T>& vec) :
|
||||
m_ref(new VectorRefT<T>(vec)), m_kind(GOpaqueTraits<T>::kind) {}
|
||||
template<typename T> explicit VectorRef(std::vector<T>& vec) :
|
||||
m_ref(new VectorRefT<T>(vec)), m_kind(GOpaqueTraits<T>::kind) {}
|
||||
template<typename T> explicit VectorRef(std::vector<T>&& vec) :
|
||||
m_ref(new VectorRefT<T>(std::move(vec))), m_kind(GOpaqueTraits<T>::kind) {}
|
||||
|
||||
cv::detail::OpaqueKind getKind() const
|
||||
{
|
||||
return m_kind;
|
||||
}
|
||||
|
||||
template<typename T> void reset()
|
||||
{
|
||||
if (!m_ref) m_ref.reset(new VectorRefT<T>());
|
||||
|
||||
check<T>();
|
||||
storeKind<T>();
|
||||
static_cast<VectorRefT<T>&>(*m_ref).reset();
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void storeKind()
|
||||
{
|
||||
m_kind = cv::detail::GOpaqueTraits<T>::kind;
|
||||
}
|
||||
|
||||
template<typename T> std::vector<T>& wref()
|
||||
{
|
||||
check<T>();
|
||||
@@ -253,6 +284,14 @@ namespace detail
|
||||
return static_cast<VectorRefT<T>&>(*m_ref).rref();
|
||||
}
|
||||
|
||||
// Check if was created for/from std::vector<T>
|
||||
template <typename T> bool holds() const
|
||||
{
|
||||
if (!m_ref) return false;
|
||||
using U = typename std::decay<T>::type;
|
||||
return dynamic_cast<VectorRefT<U>*>(m_ref.get()) != nullptr;
|
||||
}
|
||||
|
||||
void mov(VectorRef &v)
|
||||
{
|
||||
m_ref->mov(*v.m_ref);
|
||||
@@ -262,6 +301,14 @@ namespace detail
|
||||
{
|
||||
return m_ref->m_desc;
|
||||
}
|
||||
|
||||
std::size_t size() const
|
||||
{
|
||||
return m_ref->size();
|
||||
}
|
||||
|
||||
// May be used to uniquely identify this object internally
|
||||
const void *ptr() const { return m_ref->ptr(); }
|
||||
};
|
||||
|
||||
// Helper (FIXME: work-around?)
|
||||
@@ -290,23 +337,32 @@ namespace detail
|
||||
template<typename T> class GArray
|
||||
{
|
||||
public:
|
||||
GArray() { putDetails(); } // Empty constructor
|
||||
explicit GArray(detail::GArrayU &&ref) // GArrayU-based constructor
|
||||
: m_ref(ref) { putDetails(); } // (used by GCall, not for users)
|
||||
|
||||
detail::GArrayU strip() const { return m_ref; }
|
||||
|
||||
private:
|
||||
// Host type (or Flat type) - the type this GArray is actually
|
||||
// specified to.
|
||||
using HT = typename detail::flatten_g<typename std::decay<T>::type>::type;
|
||||
|
||||
static void VCTor(detail::VectorRef& vref) {
|
||||
explicit GArray(const std::vector<HT>& v) // Constant value constructor
|
||||
: m_ref(detail::GArrayU(detail::VectorRef(v))) { putDetails(); }
|
||||
explicit GArray(std::vector<HT>&& v) // Move-constructor
|
||||
: m_ref(detail::GArrayU(detail::VectorRef(std::move(v)))) { putDetails(); }
|
||||
GArray() { putDetails(); } // Empty constructor
|
||||
explicit GArray(detail::GArrayU &&ref) // GArrayU-based constructor
|
||||
: m_ref(ref) { putDetails(); } // (used by GCall, not for users)
|
||||
|
||||
/// @private
|
||||
detail::GArrayU strip() const {
|
||||
return m_ref;
|
||||
}
|
||||
/// @private
|
||||
static void VCtor(detail::VectorRef& vref) {
|
||||
vref.reset<HT>();
|
||||
}
|
||||
|
||||
private:
|
||||
void putDetails() {
|
||||
m_ref.setConstructFcn(&VCTor);
|
||||
m_ref.specifyType<HT>();
|
||||
m_ref.setConstructFcn(&VCtor);
|
||||
m_ref.specifyType<HT>(); // FIXME: to unify those 2 to avoid excessive dynamic_cast
|
||||
m_ref.storeKind<HT>(); //
|
||||
}
|
||||
|
||||
detail::GArrayU m_ref;
|
||||
|
||||
@@ -56,11 +56,16 @@ public:
|
||||
Priv& priv();
|
||||
const Priv& priv() const;
|
||||
|
||||
protected:
|
||||
std::shared_ptr<Priv> m_priv;
|
||||
// GKernel and params can be modified, it's needed for infer<Generic>,
|
||||
// because information about output shapes doesn't exist in compile time
|
||||
GKernel& kernel();
|
||||
cv::util::any& params();
|
||||
|
||||
void setArgs(std::vector<GArg> &&args);
|
||||
|
||||
protected:
|
||||
std::shared_ptr<Priv> m_priv;
|
||||
|
||||
// Public versions return a typed array or opaque, those are implementation details
|
||||
detail::GArrayU yieldArray(int output = 0);
|
||||
detail::GOpaqueU yieldOpaque(int output = 0);
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
//
|
||||
// Copyright (C) 2018 Intel Corporation
|
||||
// Copyright (C) 2018-2020 Intel Corporation
|
||||
|
||||
|
||||
#ifndef OPENCV_GAPI_GCOMMON_HPP
|
||||
@@ -15,11 +15,16 @@
|
||||
#include <opencv2/gapi/opencv_includes.hpp>
|
||||
|
||||
#include <opencv2/gapi/util/any.hpp>
|
||||
#include <opencv2/gapi/util/optional.hpp>
|
||||
#include <opencv2/gapi/own/exports.hpp>
|
||||
#include <opencv2/gapi/own/assert.hpp>
|
||||
#include <opencv2/gapi/render/render_types.hpp>
|
||||
#include <opencv2/gapi/s11n/base.hpp>
|
||||
|
||||
namespace cv {
|
||||
|
||||
class GMat; // FIXME: forward declaration for GOpaqueTraits
|
||||
|
||||
namespace detail
|
||||
{
|
||||
// This is a trait-like structure to mark backend-specific compile arguments
|
||||
@@ -31,7 +36,49 @@ namespace detail
|
||||
{};
|
||||
struct TransformTag
|
||||
{};
|
||||
}
|
||||
|
||||
// This enum is utilized mostly by GArray and GOpaque to store and recognize their internal data
|
||||
// types (aka Host type). Also it is widely used during serialization routine.
|
||||
enum class OpaqueKind: int
|
||||
{
|
||||
CV_UNKNOWN, // Unknown, generic, opaque-to-GAPI data type unsupported in graph seriallization
|
||||
CV_BOOL, // bool user G-API data
|
||||
CV_INT, // int user G-API data
|
||||
CV_DOUBLE, // double user G-API data
|
||||
CV_FLOAT, // float user G-API data
|
||||
CV_UINT64, // uint64_t user G-API data
|
||||
CV_STRING, // std::string user G-API data
|
||||
CV_POINT, // cv::Point user G-API data
|
||||
CV_SIZE, // cv::Size user G-API data
|
||||
CV_RECT, // cv::Rect user G-API data
|
||||
CV_SCALAR, // cv::Scalar user G-API data
|
||||
CV_MAT, // cv::Mat user G-API data
|
||||
CV_DRAW_PRIM, // cv::gapi::wip::draw::Prim user G-API data
|
||||
};
|
||||
|
||||
// Type traits helper which simplifies the extraction of kind from type
|
||||
template<typename T> struct GOpaqueTraits;
|
||||
template<typename T> struct GOpaqueTraits { static constexpr const OpaqueKind kind = OpaqueKind::CV_UNKNOWN; };
|
||||
template<> struct GOpaqueTraits<int> { static constexpr const OpaqueKind kind = OpaqueKind::CV_INT; };
|
||||
template<> struct GOpaqueTraits<double> { static constexpr const OpaqueKind kind = OpaqueKind::CV_DOUBLE; };
|
||||
template<> struct GOpaqueTraits<float> { static constexpr const OpaqueKind kind = OpaqueKind::CV_FLOAT; };
|
||||
template<> struct GOpaqueTraits<uint64_t> { static constexpr const OpaqueKind kind = OpaqueKind::CV_UINT64; };
|
||||
template<> struct GOpaqueTraits<bool> { static constexpr const OpaqueKind kind = OpaqueKind::CV_BOOL; };
|
||||
template<> struct GOpaqueTraits<std::string> { static constexpr const OpaqueKind kind = OpaqueKind::CV_STRING; };
|
||||
template<> struct GOpaqueTraits<cv::Size> { static constexpr const OpaqueKind kind = OpaqueKind::CV_SIZE; };
|
||||
template<> struct GOpaqueTraits<cv::Scalar> { static constexpr const OpaqueKind kind = OpaqueKind::CV_SCALAR; };
|
||||
template<> struct GOpaqueTraits<cv::Point> { static constexpr const OpaqueKind kind = OpaqueKind::CV_POINT; };
|
||||
template<> struct GOpaqueTraits<cv::Mat> { static constexpr const OpaqueKind kind = OpaqueKind::CV_MAT; };
|
||||
template<> struct GOpaqueTraits<cv::Rect> { static constexpr const OpaqueKind kind = OpaqueKind::CV_RECT; };
|
||||
template<> struct GOpaqueTraits<cv::GMat> { static constexpr const OpaqueKind kind = OpaqueKind::CV_MAT; };
|
||||
template<> struct GOpaqueTraits<cv::gapi::wip::draw::Prim>
|
||||
{ static constexpr const OpaqueKind kind = OpaqueKind::CV_DRAW_PRIM; };
|
||||
using GOpaqueTraitsArrayTypes = std::tuple<int, double, float, uint64_t, bool, std::string, cv::Size, cv::Scalar, cv::Point,
|
||||
cv::Mat, cv::Rect, cv::gapi::wip::draw::Prim>;
|
||||
// GOpaque is not supporting cv::Mat and cv::Scalar since there are GScalar and GMat types
|
||||
using GOpaqueTraitsOpaqueTypes = std::tuple<int, double, float, uint64_t, bool, std::string, cv::Size, cv::Point, cv::Rect,
|
||||
cv::gapi::wip::draw::Prim>;
|
||||
} // namespace detail
|
||||
|
||||
// This definition is here because it is reused by both public(?) and internal
|
||||
// modules. Keeping it here wouldn't expose public details (e.g., API-level)
|
||||
@@ -45,14 +92,25 @@ enum class GShape: int
|
||||
GSCALAR,
|
||||
GARRAY,
|
||||
GOPAQUE,
|
||||
GFRAME,
|
||||
};
|
||||
|
||||
namespace gapi {
|
||||
namespace s11n {
|
||||
namespace detail {
|
||||
template<typename T> struct wrap_serialize;
|
||||
} // namespace detail
|
||||
} // namespace s11n
|
||||
} // namespace gapi
|
||||
|
||||
|
||||
struct GCompileArg;
|
||||
|
||||
namespace detail {
|
||||
template<typename T>
|
||||
using is_compile_arg = std::is_same<GCompileArg, typename std::decay<T>::type>;
|
||||
}
|
||||
} // namespace detail
|
||||
|
||||
// CompileArg is an unified interface over backend-specific compilation
|
||||
// information
|
||||
// FIXME: Move to a separate file?
|
||||
@@ -91,15 +149,21 @@ namespace detail {
|
||||
* passed in (a variadic template parameter pack) into a vector of
|
||||
* cv::GCompileArg objects.
|
||||
*/
|
||||
struct GAPI_EXPORTS GCompileArg
|
||||
struct GCompileArg
|
||||
{
|
||||
public:
|
||||
// NB: Required for pythnon bindings
|
||||
GCompileArg() = default;
|
||||
|
||||
std::string tag;
|
||||
|
||||
// FIXME: use decay in GArg/other trait-based wrapper before leg is shot!
|
||||
template<typename T, typename std::enable_if<!detail::is_compile_arg<T>::value, int>::type = 0>
|
||||
explicit GCompileArg(T &&t)
|
||||
: tag(detail::CompileArgTag<typename std::decay<T>::type>::tag())
|
||||
, serializeF(cv::gapi::s11n::detail::has_S11N_spec<T>::value ?
|
||||
&cv::gapi::s11n::detail::wrap_serialize<T>::serialize :
|
||||
nullptr)
|
||||
, arg(t)
|
||||
{
|
||||
}
|
||||
@@ -114,21 +178,63 @@ public:
|
||||
return util::any_cast<T>(arg);
|
||||
}
|
||||
|
||||
void serialize(cv::gapi::s11n::IOStream& os) const
|
||||
{
|
||||
if (serializeF)
|
||||
{
|
||||
serializeF(os, *this);
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
std::function<void(cv::gapi::s11n::IOStream&, const GCompileArg&)> serializeF;
|
||||
util::any arg;
|
||||
};
|
||||
|
||||
using GCompileArgs = std::vector<GCompileArg>;
|
||||
|
||||
/**
|
||||
* Wraps a list of arguments (a parameter pack) into a vector of
|
||||
* compilation arguments (cv::GCompileArg).
|
||||
* @brief Wraps a list of arguments (a parameter pack) into a vector of
|
||||
* compilation arguments (cv::GCompileArg).
|
||||
*/
|
||||
template<typename... Ts> GCompileArgs compile_args(Ts&&... args)
|
||||
{
|
||||
return GCompileArgs{ GCompileArg(args)... };
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Retrieves particular compilation argument by its type from
|
||||
* cv::GCompileArgs
|
||||
*/
|
||||
namespace gapi
|
||||
{
|
||||
template<typename T>
|
||||
inline cv::util::optional<T> getCompileArg(const cv::GCompileArgs &args)
|
||||
{
|
||||
for (auto &compile_arg : args)
|
||||
{
|
||||
if (compile_arg.tag == cv::detail::CompileArgTag<T>::tag())
|
||||
{
|
||||
return cv::util::optional<T>(compile_arg.get<T>());
|
||||
}
|
||||
}
|
||||
return cv::util::optional<T>();
|
||||
}
|
||||
|
||||
namespace s11n {
|
||||
namespace detail {
|
||||
template<typename T> struct wrap_serialize
|
||||
{
|
||||
static void serialize(IOStream& os, const GCompileArg& arg)
|
||||
{
|
||||
using DT = typename std::decay<T>::type;
|
||||
S11N<DT>::serialize(os, arg.get<DT>());
|
||||
}
|
||||
};
|
||||
} // namespace detail
|
||||
} // namespace s11n
|
||||
} // namespace gapi
|
||||
|
||||
/**
|
||||
* @brief Ask G-API to dump compiled graph in Graphviz format under
|
||||
* the given file name.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
//
|
||||
// Copyright (C) 2018 Intel Corporation
|
||||
// Copyright (C) 2018-2020 Intel Corporation
|
||||
|
||||
|
||||
#ifndef OPENCV_GAPI_GCOMPILED_HPP
|
||||
@@ -208,6 +208,19 @@ public:
|
||||
// FIXME: Why it requires compile args?
|
||||
void reshape(const GMetaArgs& inMetas, const GCompileArgs& args);
|
||||
|
||||
/**
|
||||
* @brief Prepare inner kernels states for a new video-stream.
|
||||
*
|
||||
* GCompiled objects may be used to process video streams frame by frame.
|
||||
* In this case, a GCompiled is called on every image frame individually.
|
||||
* Starting OpenCV 4.4, some kernels in the graph may have their internal
|
||||
* states (see GAPI_OCV_KERNEL_ST for the OpenCV backend).
|
||||
* In this case, if user starts processing another video stream with
|
||||
* this GCompiled, this method needs to be called to let kernels re-initialize
|
||||
* their internal states to a new video stream.
|
||||
*/
|
||||
void prepareForNewStream();
|
||||
|
||||
protected:
|
||||
/// @private
|
||||
std::shared_ptr<Priv> m_priv;
|
||||
|
||||
@@ -75,6 +75,16 @@ template<typename U> struct get_compound_in<cv::GOpaque<U>>
|
||||
}
|
||||
};
|
||||
|
||||
template<> struct get_compound_in<cv::GMatP>
|
||||
{
|
||||
static cv::GMatP get(GCompoundContext &ctx, int idx)
|
||||
{
|
||||
auto mat = cv::GMatP();
|
||||
ctx.m_args[idx] = GArg(mat);
|
||||
return mat;
|
||||
}
|
||||
};
|
||||
|
||||
template<typename, typename, typename>
|
||||
struct GCompoundCallHelper;
|
||||
|
||||
|
||||
@@ -36,6 +36,14 @@ namespace detail
|
||||
using last_type_t = typename last_type<Ts...>::type;
|
||||
}
|
||||
|
||||
// Forward-declare the serialization objects
|
||||
namespace gapi {
|
||||
namespace s11n {
|
||||
struct IIStream;
|
||||
struct IOStream;
|
||||
} // namespace s11n
|
||||
} // namespace gapi
|
||||
|
||||
/**
|
||||
* \addtogroup gapi_main_classes
|
||||
* @{
|
||||
@@ -108,7 +116,7 @@ namespace detail
|
||||
*
|
||||
* @sa GCompiled
|
||||
*/
|
||||
class GAPI_EXPORTS GComputation
|
||||
class GAPI_EXPORTS_W GComputation
|
||||
{
|
||||
public:
|
||||
class Priv;
|
||||
@@ -151,8 +159,8 @@ public:
|
||||
*
|
||||
* @sa @ref gapi_data_objects
|
||||
*/
|
||||
GComputation(GProtoInputArgs &&ins,
|
||||
GProtoOutputArgs &&outs); // Arg-to-arg overload
|
||||
GAPI_WRAP GComputation(GProtoInputArgs &&ins,
|
||||
GProtoOutputArgs &&outs); // Arg-to-arg overload
|
||||
|
||||
// 2. Syntax sugar and compatibility overloads
|
||||
/**
|
||||
@@ -162,7 +170,7 @@ public:
|
||||
* @param in input GMat of the defined unary computation
|
||||
* @param out output GMat of the defined unary computation
|
||||
*/
|
||||
GComputation(GMat in, GMat out); // Unary overload
|
||||
GAPI_WRAP GComputation(GMat in, GMat out); // Unary overload
|
||||
|
||||
/**
|
||||
* @brief Defines an unary (one input -- one output) computation
|
||||
@@ -171,7 +179,7 @@ public:
|
||||
* @param in input GMat of the defined unary computation
|
||||
* @param out output GScalar of the defined unary computation
|
||||
*/
|
||||
GComputation(GMat in, GScalar out); // Unary overload (scalar)
|
||||
GAPI_WRAP GComputation(GMat in, GScalar out); // Unary overload (scalar)
|
||||
|
||||
/**
|
||||
* @brief Defines a binary (two inputs -- one output) computation
|
||||
@@ -181,7 +189,7 @@ public:
|
||||
* @param in2 second input GMat of the defined binary computation
|
||||
* @param out output GMat of the defined binary computation
|
||||
*/
|
||||
GComputation(GMat in1, GMat in2, GMat out); // Binary overload
|
||||
GAPI_WRAP GComputation(GMat in1, GMat in2, GMat out); // Binary overload
|
||||
|
||||
/**
|
||||
* @brief Defines a binary (two inputs -- one output) computation
|
||||
@@ -250,8 +258,11 @@ public:
|
||||
void apply(GRunArgs &&ins, GRunArgsP &&outs, GCompileArgs &&args = {}); // Arg-to-arg overload
|
||||
|
||||
/// @private -- Exclude this function from OpenCV documentation
|
||||
void apply(const std::vector<cv::gapi::own::Mat>& ins, // Compatibility overload
|
||||
const std::vector<cv::gapi::own::Mat>& outs,
|
||||
GAPI_WRAP GRunArgs apply(GRunArgs &&ins, GCompileArgs &&args = {});
|
||||
|
||||
/// @private -- Exclude this function from OpenCV documentation
|
||||
void apply(const std::vector<cv::Mat>& ins, // Compatibility overload
|
||||
const std::vector<cv::Mat>& outs,
|
||||
GCompileArgs &&args = {});
|
||||
|
||||
// 2. Syntax sugar and compatibility overloads
|
||||
@@ -265,7 +276,7 @@ public:
|
||||
* @param args compilation arguments for underlying compilation
|
||||
* process.
|
||||
*/
|
||||
void apply(cv::Mat in, cv::Mat &out, GCompileArgs &&args = {}); // Unary overload
|
||||
void apply(cv::Mat in, cv::Mat &out, GCompileArgs &&args = {}); // Unary overload
|
||||
|
||||
/**
|
||||
* @brief Execute an unary computation (with compilation on the fly)
|
||||
@@ -276,7 +287,7 @@ public:
|
||||
* @param args compilation arguments for underlying compilation
|
||||
* process.
|
||||
*/
|
||||
void apply(cv::Mat in, cv::Scalar &out, GCompileArgs &&args = {}); // Unary overload (scalar)
|
||||
void apply(cv::Mat in, cv::Scalar &out, GCompileArgs &&args = {}); // Unary overload (scalar)
|
||||
|
||||
/**
|
||||
* @brief Execute a binary computation (with compilation on the fly)
|
||||
@@ -425,7 +436,7 @@ public:
|
||||
*
|
||||
* @sa @ref gapi_compile_args
|
||||
*/
|
||||
GStreamingCompiled compileStreaming(GMetaArgs &&in_metas, GCompileArgs &&args = {});
|
||||
GAPI_WRAP GStreamingCompiled compileStreaming(GMetaArgs &&in_metas, GCompileArgs &&args = {});
|
||||
|
||||
/**
|
||||
* @brief Compile the computation for streaming mode.
|
||||
@@ -446,7 +457,7 @@ public:
|
||||
*
|
||||
* @sa @ref gapi_compile_args
|
||||
*/
|
||||
GStreamingCompiled compileStreaming(GCompileArgs &&args = {});
|
||||
GAPI_WRAP GStreamingCompiled compileStreaming(GCompileArgs &&args = {});
|
||||
|
||||
// 2. Direct metadata version
|
||||
/**
|
||||
@@ -495,6 +506,10 @@ public:
|
||||
Priv& priv();
|
||||
/// @private
|
||||
const Priv& priv() const;
|
||||
/// @private
|
||||
explicit GComputation(cv::gapi::s11n::IIStream &);
|
||||
/// @private
|
||||
void serialize(cv::gapi::s11n::IOStream &) const;
|
||||
|
||||
protected:
|
||||
|
||||
@@ -514,6 +529,7 @@ protected:
|
||||
GCompileArgs comp_args = std::get<sizeof...(Ts)-1>(meta_and_compile_args);
|
||||
return compileStreaming(std::move(meta_args), std::move(comp_args));
|
||||
}
|
||||
void recompile(GMetaArgs&& in_metas, GCompileArgs &&args);
|
||||
/// @private
|
||||
std::shared_ptr<Priv> m_priv;
|
||||
};
|
||||
|
||||
69
inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/gframe.hpp
vendored
Normal file
69
inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/gframe.hpp
vendored
Normal file
@@ -0,0 +1,69 @@
|
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
//
|
||||
// Copyright (C) 2020 Intel Corporation
|
||||
|
||||
|
||||
#ifndef OPENCV_GAPI_GFRAME_HPP
|
||||
#define OPENCV_GAPI_GFRAME_HPP
|
||||
|
||||
#include <ostream>
|
||||
#include <memory> // std::shared_ptr
|
||||
|
||||
#include <opencv2/gapi/opencv_includes.hpp>
|
||||
#include <opencv2/gapi/gcommon.hpp> // GShape
|
||||
|
||||
#include <opencv2/gapi/gmat.hpp>
|
||||
#include <opencv2/gapi/own/assert.hpp>
|
||||
|
||||
// TODO GAPI_EXPORTS or so
|
||||
namespace cv
|
||||
{
|
||||
// Forward declaration; GNode and GOrigin are an internal
|
||||
// (user-inaccessible) classes.
|
||||
class GNode;
|
||||
struct GOrigin;
|
||||
|
||||
/** \addtogroup gapi_data_objects
|
||||
* @{
|
||||
*/
|
||||
class GAPI_EXPORTS_W_SIMPLE GFrame
|
||||
{
|
||||
public:
|
||||
GAPI_WRAP GFrame(); // Empty constructor
|
||||
GFrame(const GNode &n, std::size_t out); // Operation result constructor
|
||||
|
||||
GOrigin& priv(); // Internal use only
|
||||
const GOrigin& priv() const; // Internal use only
|
||||
|
||||
private:
|
||||
std::shared_ptr<GOrigin> m_priv;
|
||||
};
|
||||
/** @} */
|
||||
|
||||
enum class MediaFormat: int
|
||||
{
|
||||
BGR = 0,
|
||||
NV12,
|
||||
};
|
||||
|
||||
/**
|
||||
* \addtogroup gapi_meta_args
|
||||
* @{
|
||||
*/
|
||||
struct GAPI_EXPORTS GFrameDesc
|
||||
{
|
||||
MediaFormat fmt;
|
||||
cv::Size size;
|
||||
|
||||
bool operator== (const GFrameDesc &) const;
|
||||
};
|
||||
static inline GFrameDesc empty_gframe_desc() { return GFrameDesc{}; }
|
||||
/** @} */
|
||||
|
||||
GAPI_EXPORTS std::ostream& operator<<(std::ostream& os, const cv::GFrameDesc &desc);
|
||||
|
||||
} // namespace cv
|
||||
|
||||
#endif // OPENCV_GAPI_GFRAME_HPP
|
||||
@@ -2,7 +2,7 @@
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
//
|
||||
// Copyright (C) 2018-2019 Intel Corporation
|
||||
// Copyright (C) 2018-2020 Intel Corporation
|
||||
|
||||
|
||||
#ifndef OPENCV_GAPI_GKERNEL_HPP
|
||||
@@ -27,6 +27,8 @@
|
||||
namespace cv {
|
||||
|
||||
using GShapes = std::vector<GShape>;
|
||||
using GKinds = std::vector<cv::detail::OpaqueKind>;
|
||||
using GCtors = std::vector<detail::HostCtor>;
|
||||
|
||||
// GKernel describes kernel API to the system
|
||||
// FIXME: add attributes of a kernel, (e.g. number and types
|
||||
@@ -35,16 +37,21 @@ struct GAPI_EXPORTS GKernel
|
||||
{
|
||||
using M = std::function<GMetaArgs(const GMetaArgs &, const GArgs &)>;
|
||||
|
||||
const std::string name; // kernel ID, defined by its API (signature)
|
||||
const std::string tag; // some (implementation-specific) tag
|
||||
const M outMeta; // generic adaptor to API::outMeta(...)
|
||||
const GShapes outShapes; // types (shapes) kernel's outputs
|
||||
std::string name; // kernel ID, defined by its API (signature)
|
||||
std::string tag; // some (implementation-specific) tag
|
||||
M outMeta; // generic adaptor to API::outMeta(...)
|
||||
GShapes outShapes; // types (shapes) kernel's outputs
|
||||
GKinds inKinds; // kinds of kernel's inputs (fixme: below)
|
||||
GCtors outCtors; // captured constructors for template output types
|
||||
};
|
||||
// TODO: It's questionable if inKinds should really be here. Instead,
|
||||
// this information could come from meta.
|
||||
|
||||
// GKernelImpl describes particular kernel implementation to the system
|
||||
struct GAPI_EXPORTS GKernelImpl
|
||||
{
|
||||
util::any opaque; // backend-specific opaque info
|
||||
GKernel::M outMeta; // for deserialized graphs, the outMeta is taken here
|
||||
};
|
||||
|
||||
template<typename, typename> class GKernelTypeM;
|
||||
@@ -55,30 +62,27 @@ namespace detail
|
||||
// yield() is used in graph construction time as a generic method to obtain
|
||||
// lazy "return value" of G-API operations
|
||||
//
|
||||
namespace
|
||||
template<typename T> struct Yield;
|
||||
template<> struct Yield<cv::GMat>
|
||||
{
|
||||
template<typename T> struct Yield;
|
||||
template<> struct Yield<cv::GMat>
|
||||
{
|
||||
static inline cv::GMat yield(cv::GCall &call, int i) { return call.yield(i); }
|
||||
};
|
||||
template<> struct Yield<cv::GMatP>
|
||||
{
|
||||
static inline cv::GMatP yield(cv::GCall &call, int i) { return call.yieldP(i); }
|
||||
};
|
||||
template<> struct Yield<cv::GScalar>
|
||||
{
|
||||
static inline cv::GScalar yield(cv::GCall &call, int i) { return call.yieldScalar(i); }
|
||||
};
|
||||
template<typename U> struct Yield<cv::GArray<U> >
|
||||
{
|
||||
static inline cv::GArray<U> yield(cv::GCall &call, int i) { return call.yieldArray<U>(i); }
|
||||
};
|
||||
template<typename U> struct Yield<cv::GOpaque<U> >
|
||||
{
|
||||
static inline cv::GOpaque<U> yield(cv::GCall &call, int i) { return call.yieldOpaque<U>(i); }
|
||||
};
|
||||
} // anonymous namespace
|
||||
static inline cv::GMat yield(cv::GCall &call, int i) { return call.yield(i); }
|
||||
};
|
||||
template<> struct Yield<cv::GMatP>
|
||||
{
|
||||
static inline cv::GMatP yield(cv::GCall &call, int i) { return call.yieldP(i); }
|
||||
};
|
||||
template<> struct Yield<cv::GScalar>
|
||||
{
|
||||
static inline cv::GScalar yield(cv::GCall &call, int i) { return call.yieldScalar(i); }
|
||||
};
|
||||
template<typename U> struct Yield<cv::GArray<U> >
|
||||
{
|
||||
static inline cv::GArray<U> yield(cv::GCall &call, int i) { return call.yieldArray<U>(i); }
|
||||
};
|
||||
template<typename U> struct Yield<cv::GOpaque<U> >
|
||||
{
|
||||
static inline cv::GOpaque<U> yield(cv::GCall &call, int i) { return call.yieldOpaque<U>(i); }
|
||||
};
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
// Helper classes which brings outputMeta() marshalling to kernel
|
||||
@@ -90,10 +94,12 @@ namespace detail
|
||||
template<typename T> struct MetaType;
|
||||
template<> struct MetaType<cv::GMat> { using type = GMatDesc; };
|
||||
template<> struct MetaType<cv::GMatP> { using type = GMatDesc; };
|
||||
template<> struct MetaType<cv::GFrame> { using type = GFrameDesc; };
|
||||
template<> struct MetaType<cv::GScalar> { using type = GScalarDesc; };
|
||||
template<typename U> struct MetaType<cv::GArray<U> > { using type = GArrayDesc; };
|
||||
template<typename U> struct MetaType<cv::GOpaque<U> > { using type = GOpaqueDesc; };
|
||||
template<typename T> struct MetaType { using type = T; }; // opaque args passed as-is
|
||||
// FIXME: Move it to type traits?
|
||||
|
||||
// 2. Hacky test based on MetaType to check if we operate on G-* type or not
|
||||
template<typename T> using is_nongapi_type = std::is_same<T, typename MetaType<T>::type>;
|
||||
@@ -201,10 +207,16 @@ public:
|
||||
using InArgs = std::tuple<Args...>;
|
||||
using OutArgs = std::tuple<R...>;
|
||||
|
||||
// TODO: Args&&... here?
|
||||
static std::tuple<R...> on(Args... args)
|
||||
{
|
||||
cv::GCall call(GKernel{K::id(), K::tag(), &K::getOutMeta, {detail::GTypeTraits<R>::shape...}});
|
||||
call.pass(args...);
|
||||
cv::GCall call(GKernel{ K::id()
|
||||
, K::tag()
|
||||
, &K::getOutMeta
|
||||
, {detail::GTypeTraits<R>::shape...}
|
||||
, {detail::GTypeTraits<Args>::op_kind...}
|
||||
, {detail::GObtainCtor<R>::get()...}});
|
||||
call.pass(args...); // TODO: std::forward() here?
|
||||
return yield(call, typename detail::MkSeq<sizeof...(R)>::type());
|
||||
}
|
||||
};
|
||||
@@ -220,9 +232,16 @@ public:
|
||||
using InArgs = std::tuple<Args...>;
|
||||
using OutArgs = std::tuple<R>;
|
||||
|
||||
static_assert(!cv::detail::contains<GFrame, OutArgs>::value, "Values of GFrame type can't be used as operation outputs");
|
||||
|
||||
static R on(Args... args)
|
||||
{
|
||||
cv::GCall call(GKernel{K::id(), K::tag(), &K::getOutMeta, {detail::GTypeTraits<R>::shape}});
|
||||
cv::GCall call(GKernel{ K::id()
|
||||
, K::tag()
|
||||
, &K::getOutMeta
|
||||
, {detail::GTypeTraits<R>::shape}
|
||||
, {detail::GTypeTraits<Args>::op_kind...}
|
||||
, {detail::GObtainCtor<R>::get()}});
|
||||
call.pass(args...);
|
||||
return detail::Yield<R>::yield(call, 0);
|
||||
}
|
||||
@@ -428,7 +447,7 @@ namespace gapi {
|
||||
* Finally, two kernel packages can be combined into a new one
|
||||
* with function cv::gapi::combine().
|
||||
*/
|
||||
class GAPI_EXPORTS GKernelPackage
|
||||
class GAPI_EXPORTS_W_SIMPLE GKernelPackage
|
||||
{
|
||||
|
||||
/// @private
|
||||
@@ -441,11 +460,6 @@ namespace gapi {
|
||||
std::vector<GTransform> m_transformations;
|
||||
|
||||
protected:
|
||||
/// @private
|
||||
// Check if package contains ANY implementation of a kernel API
|
||||
// by API textual id.
|
||||
bool includesAPI(const std::string &id) const;
|
||||
|
||||
/// @private
|
||||
// Remove ALL implementations of the given API (identified by ID)
|
||||
void removeAPI(const std::string &id);
|
||||
@@ -453,12 +467,12 @@ namespace gapi {
|
||||
/// @private
|
||||
// Partial include() specialization for kernels
|
||||
template <typename KImpl>
|
||||
typename std::enable_if<(std::is_base_of<detail::KernelTag, KImpl>::value), void>::type
|
||||
typename std::enable_if<(std::is_base_of<cv::detail::KernelTag, KImpl>::value), void>::type
|
||||
includeHelper()
|
||||
{
|
||||
auto backend = KImpl::backend();
|
||||
auto kernel_id = KImpl::API::id();
|
||||
auto kernel_impl = GKernelImpl{KImpl::kernel()};
|
||||
auto kernel_impl = GKernelImpl{KImpl::kernel(), &KImpl::API::getOutMeta};
|
||||
removeAPI(kernel_id);
|
||||
|
||||
m_id_kernels[kernel_id] = std::make_pair(backend, kernel_impl);
|
||||
@@ -467,7 +481,7 @@ namespace gapi {
|
||||
/// @private
|
||||
// Partial include() specialization for transformations
|
||||
template <typename TImpl>
|
||||
typename std::enable_if<(std::is_base_of<detail::TransformTag, TImpl>::value), void>::type
|
||||
typename std::enable_if<(std::is_base_of<cv::detail::TransformTag, TImpl>::value), void>::type
|
||||
includeHelper()
|
||||
{
|
||||
m_transformations.emplace_back(TImpl::transformation());
|
||||
@@ -506,7 +520,7 @@ namespace gapi {
|
||||
template<typename KImpl>
|
||||
bool includes() const
|
||||
{
|
||||
static_assert(std::is_base_of<detail::KernelTag, KImpl>::value,
|
||||
static_assert(std::is_base_of<cv::detail::KernelTag, KImpl>::value,
|
||||
"includes() can be applied to kernels only");
|
||||
|
||||
auto kernel_it = m_id_kernels.find(KImpl::API::id());
|
||||
@@ -548,6 +562,9 @@ namespace gapi {
|
||||
return includesAPI(KAPI::id());
|
||||
}
|
||||
|
||||
/// @private
|
||||
bool includesAPI(const std::string &id) const;
|
||||
|
||||
// FIXME: The below comment is wrong, and who needs this function?
|
||||
/**
|
||||
* @brief Find a kernel (by its API)
|
||||
@@ -621,7 +638,7 @@ namespace gapi {
|
||||
{
|
||||
// FIXME: currently there is no check that transformations' signatures are unique
|
||||
// and won't be any intersection in graph compilation stage
|
||||
static_assert(detail::all_unique<typename KK::API...>::value, "Kernels API must be unique");
|
||||
static_assert(cv::detail::all_unique<typename KK::API...>::value, "Kernels API must be unique");
|
||||
|
||||
GKernelPackage pkg;
|
||||
|
||||
@@ -695,6 +712,7 @@ namespace detail
|
||||
static const char* tag() { return "gapi.use_only"; }
|
||||
};
|
||||
} // namespace detail
|
||||
|
||||
} // namespace cv
|
||||
|
||||
#endif // OPENCV_GAPI_GKERNEL_HPP
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
//
|
||||
// Copyright (C) 2018 Intel Corporation
|
||||
// Copyright (C) 2018-2020 Intel Corporation
|
||||
|
||||
|
||||
#ifndef OPENCV_GAPI_GMAT_HPP
|
||||
@@ -46,10 +46,10 @@ struct GOrigin;
|
||||
* `cv::GArray<T>` | std::vector<T>
|
||||
* `cv::GOpaque<T>` | T
|
||||
*/
|
||||
class GAPI_EXPORTS GMat
|
||||
class GAPI_EXPORTS_W_SIMPLE GMat
|
||||
{
|
||||
public:
|
||||
GMat(); // Empty constructor
|
||||
GAPI_WRAP GMat(); // Empty constructor
|
||||
GMat(const GNode &n, std::size_t out); // Operation result constructor
|
||||
|
||||
GOrigin& priv(); // Internal use only
|
||||
@@ -65,9 +65,7 @@ public:
|
||||
using GMat::GMat;
|
||||
};
|
||||
|
||||
namespace gapi { namespace own {
|
||||
class Mat;
|
||||
}}//gapi::own
|
||||
class RMat;
|
||||
|
||||
/** @} */
|
||||
|
||||
@@ -115,7 +113,9 @@ struct GAPI_EXPORTS GMatDesc
|
||||
// (it handles the case when
|
||||
// 1-channel mat can be reinterpreted as is (1-channel mat)
|
||||
// and as a 3-channel planar mat with height divided by 3)
|
||||
bool canDescribe(const cv::gapi::own::Mat& mat) const;
|
||||
bool canDescribe(const cv::Mat& mat) const;
|
||||
|
||||
bool canDescribe(const cv::RMat& mat) const;
|
||||
|
||||
// Meta combinator: return a new GMatDesc which differs in size by delta
|
||||
// (all other fields are taken unchanged from this GMatDesc)
|
||||
@@ -126,9 +126,6 @@ struct GAPI_EXPORTS GMatDesc
|
||||
desc.size += delta;
|
||||
return desc;
|
||||
}
|
||||
#if !defined(GAPI_STANDALONE)
|
||||
bool canDescribe(const cv::Mat& mat) const;
|
||||
#endif // !defined(GAPI_STANDALONE)
|
||||
// Meta combinator: return a new GMatDesc which differs in size by delta
|
||||
// (all other fields are taken unchanged from this GMatDesc)
|
||||
//
|
||||
@@ -207,18 +204,25 @@ struct GAPI_EXPORTS GMatDesc
|
||||
static inline GMatDesc empty_gmat_desc() { return GMatDesc{-1,-1,{-1,-1}}; }
|
||||
|
||||
#if !defined(GAPI_STANDALONE)
|
||||
class Mat;
|
||||
GAPI_EXPORTS GMatDesc descr_of(const cv::Mat &mat);
|
||||
GAPI_EXPORTS GMatDesc descr_of(const cv::UMat &mat);
|
||||
#endif // !defined(GAPI_STANDALONE)
|
||||
|
||||
/** @} */
|
||||
|
||||
// FIXME: WHY??? WHY it is under different namespace?
|
||||
//Fwd declarations
|
||||
namespace gapi { namespace own {
|
||||
class Mat;
|
||||
GAPI_EXPORTS GMatDesc descr_of(const Mat &mat);
|
||||
}}//gapi::own
|
||||
|
||||
GAPI_EXPORTS GMatDesc descr_of(const RMat &mat);
|
||||
|
||||
#if !defined(GAPI_STANDALONE)
|
||||
GAPI_EXPORTS GMatDesc descr_of(const cv::Mat &mat);
|
||||
#else
|
||||
using gapi::own::descr_of;
|
||||
#endif
|
||||
|
||||
/** @} */
|
||||
|
||||
GAPI_EXPORTS std::ostream& operator<<(std::ostream& os, const cv::GMatDesc &desc);
|
||||
|
||||
} // namespace cv
|
||||
|
||||
@@ -18,6 +18,7 @@
|
||||
#include <opencv2/gapi/gscalar.hpp>
|
||||
#include <opencv2/gapi/garray.hpp>
|
||||
#include <opencv2/gapi/gopaque.hpp>
|
||||
#include <opencv2/gapi/gframe.hpp>
|
||||
|
||||
namespace cv
|
||||
{
|
||||
@@ -38,6 +39,7 @@ using GMetaArg = util::variant
|
||||
, GScalarDesc
|
||||
, GArrayDesc
|
||||
, GOpaqueDesc
|
||||
, GFrameDesc
|
||||
>;
|
||||
GAPI_EXPORTS std::ostream& operator<<(std::ostream& os, const GMetaArg &);
|
||||
|
||||
@@ -66,12 +68,10 @@ namespace detail
|
||||
|
||||
// Note: descr_of(std::vector<..>) returns a GArrayDesc, while
|
||||
// descrs_of(std::vector<..>) returns an array of Meta args!
|
||||
class Mat;
|
||||
class UMat;
|
||||
GAPI_EXPORTS cv::GMetaArgs descrs_of(const std::vector<cv::Mat> &vec);
|
||||
GAPI_EXPORTS cv::GMetaArgs descrs_of(const std::vector<cv::UMat> &vec);
|
||||
namespace gapi { namespace own {
|
||||
class Mat;
|
||||
GAPI_EXPORTS cv::GMetaArgs descrs_of(const std::vector<Mat> &vec);
|
||||
}} // namespace gapi::own
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
//
|
||||
// Copyright (C) 2019 Intel Corporation
|
||||
// Copyright (C) 2019-2020 Intel Corporation
|
||||
|
||||
|
||||
#ifndef OPENCV_GAPI_GOPAQUE_HPP
|
||||
@@ -15,8 +15,10 @@
|
||||
#include <opencv2/gapi/own/exports.hpp>
|
||||
#include <opencv2/gapi/opencv_includes.hpp>
|
||||
|
||||
#include <opencv2/gapi/util/any.hpp>
|
||||
#include <opencv2/gapi/util/variant.hpp>
|
||||
#include <opencv2/gapi/util/throw.hpp>
|
||||
#include <opencv2/gapi/util/type_traits.hpp>
|
||||
#include <opencv2/gapi/own/assert.hpp>
|
||||
|
||||
namespace cv
|
||||
@@ -25,7 +27,6 @@ namespace cv
|
||||
// (user-inaccessible) classes.
|
||||
class GNode;
|
||||
struct GOrigin;
|
||||
|
||||
template<typename T> class GOpaque;
|
||||
|
||||
/**
|
||||
@@ -80,6 +81,11 @@ namespace detail
|
||||
template <typename T>
|
||||
void specifyType(); // Store type of initial GOpaque<T>
|
||||
|
||||
template <typename T>
|
||||
void storeKind();
|
||||
|
||||
void setKind(cv::detail::OpaqueKind);
|
||||
|
||||
std::shared_ptr<GOrigin> m_priv;
|
||||
std::shared_ptr<TypeHintBase> m_hint;
|
||||
};
|
||||
@@ -87,13 +93,19 @@ namespace detail
|
||||
template <typename T>
|
||||
bool GOpaqueU::holds() const{
|
||||
GAPI_Assert(m_hint != nullptr);
|
||||
using U = typename std::decay<T>::type;
|
||||
using U = util::decay_t<T>;
|
||||
return dynamic_cast<TypeHint<U>*>(m_hint.get()) != nullptr;
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
void GOpaqueU::specifyType(){
|
||||
m_hint.reset(new TypeHint<typename std::decay<T>::type>);
|
||||
m_hint.reset(new TypeHint<util::decay_t<T>>);
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
void GOpaqueU::storeKind(){
|
||||
// FIXME: Add assert here on cv::Mat and cv::Scalar?
|
||||
setKind(cv::detail::GOpaqueTraits<T>::kind);
|
||||
};
|
||||
|
||||
// This class represents a typed object reference.
|
||||
@@ -107,6 +119,8 @@ namespace detail
|
||||
virtual ~BasicOpaqueRef() {}
|
||||
|
||||
virtual void mov(BasicOpaqueRef &ref) = 0;
|
||||
virtual const void* ptr() const = 0;
|
||||
virtual void set(const cv::util::any &a) = 0;
|
||||
};
|
||||
|
||||
template<typename T> class OpaqueRefT final: public BasicOpaqueRef
|
||||
@@ -198,6 +212,12 @@ namespace detail
|
||||
GAPI_Assert(tv != nullptr);
|
||||
wref() = std::move(tv->wref());
|
||||
}
|
||||
|
||||
virtual const void* ptr() const override { return &rref(); }
|
||||
|
||||
virtual void set(const cv::util::any &a) override {
|
||||
wref() = util::any_cast<T>(a);
|
||||
}
|
||||
};
|
||||
|
||||
// This class strips type information from OpaqueRefT<> and makes it usable
|
||||
@@ -209,6 +229,7 @@ namespace detail
|
||||
class OpaqueRef
|
||||
{
|
||||
std::shared_ptr<BasicOpaqueRef> m_ref;
|
||||
cv::detail::OpaqueKind m_kind;
|
||||
|
||||
template<typename T> inline void check() const
|
||||
{
|
||||
@@ -218,17 +239,34 @@ namespace detail
|
||||
public:
|
||||
OpaqueRef() = default;
|
||||
|
||||
template<typename T> explicit OpaqueRef(T&& obj) :
|
||||
m_ref(new OpaqueRefT<typename std::decay<T>::type>(std::forward<T>(obj))) {}
|
||||
template<
|
||||
typename T,
|
||||
typename = util::are_different_t<OpaqueRef, T>
|
||||
>
|
||||
// FIXME: probably won't work with const object
|
||||
explicit OpaqueRef(T&& obj) :
|
||||
m_ref(new OpaqueRefT<util::decay_t<T>>(std::forward<T>(obj))),
|
||||
m_kind(GOpaqueTraits<util::decay_t<T>>::kind) {}
|
||||
|
||||
cv::detail::OpaqueKind getKind() const
|
||||
{
|
||||
return m_kind;
|
||||
}
|
||||
|
||||
template<typename T> void reset()
|
||||
{
|
||||
if (!m_ref) m_ref.reset(new OpaqueRefT<T>());
|
||||
|
||||
check<T>();
|
||||
storeKind<T>();
|
||||
static_cast<OpaqueRefT<T>&>(*m_ref).reset();
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void storeKind()
|
||||
{
|
||||
m_kind = cv::detail::GOpaqueTraits<T>::kind;
|
||||
}
|
||||
|
||||
template<typename T> T& wref()
|
||||
{
|
||||
check<T>();
|
||||
@@ -250,6 +288,16 @@ namespace detail
|
||||
{
|
||||
return m_ref->m_desc;
|
||||
}
|
||||
|
||||
// May be used to uniquely identify this object internally
|
||||
const void *ptr() const { return m_ref->ptr(); }
|
||||
|
||||
// Introduced for in-graph meta handling
|
||||
OpaqueRef& operator= (const cv::util::any &a)
|
||||
{
|
||||
m_ref->set(a);
|
||||
return *this;
|
||||
}
|
||||
};
|
||||
} // namespace detail
|
||||
|
||||
@@ -260,23 +308,27 @@ namespace detail
|
||||
template<typename T> class GOpaque
|
||||
{
|
||||
public:
|
||||
// Host type (or Flat type) - the type this GOpaque is actually
|
||||
// specified to.
|
||||
using HT = typename detail::flatten_g<util::decay_t<T>>::type;
|
||||
|
||||
GOpaque() { putDetails(); } // Empty constructor
|
||||
explicit GOpaque(detail::GOpaqueU &&ref) // GOpaqueU-based constructor
|
||||
: m_ref(ref) { putDetails(); } // (used by GCall, not for users)
|
||||
|
||||
detail::GOpaqueU strip() const { return m_ref; }
|
||||
|
||||
private:
|
||||
// Host type (or Flat type) - the type this GOpaque is actually
|
||||
// specified to.
|
||||
using HT = typename detail::flatten_g<typename std::decay<T>::type>::type;
|
||||
|
||||
static void CTor(detail::OpaqueRef& ref) {
|
||||
/// @private
|
||||
detail::GOpaqueU strip() const {
|
||||
return m_ref;
|
||||
}
|
||||
/// @private
|
||||
static void Ctor(detail::OpaqueRef& ref) {
|
||||
ref.reset<HT>();
|
||||
}
|
||||
private:
|
||||
void putDetails() {
|
||||
m_ref.setConstructFcn(&CTor);
|
||||
m_ref.setConstructFcn(&Ctor);
|
||||
m_ref.specifyType<HT>();
|
||||
m_ref.storeKind<HT>();
|
||||
}
|
||||
|
||||
detail::GOpaqueU m_ref;
|
||||
|
||||
@@ -36,6 +36,7 @@ namespace cv {
|
||||
using GProtoArg = util::variant
|
||||
< GMat
|
||||
, GMatP
|
||||
, GFrame
|
||||
, GScalar
|
||||
, detail::GArrayU // instead of GArray<T>
|
||||
, detail::GOpaqueU // instead of GOpaque<T>
|
||||
@@ -56,12 +57,35 @@ template<class Tag>
|
||||
struct GIOProtoArgs
|
||||
{
|
||||
public:
|
||||
// NB: Used by python wrapper
|
||||
GIOProtoArgs() = default;
|
||||
explicit GIOProtoArgs(const GProtoArgs& args) : m_args(args) {}
|
||||
explicit GIOProtoArgs(GProtoArgs &&args) : m_args(std::move(args)) {}
|
||||
|
||||
GProtoArgs m_args;
|
||||
|
||||
// TODO: Think about the addition operator
|
||||
/**
|
||||
* @brief This operator allows to complement the proto vectors at runtime.
|
||||
*
|
||||
* It's an ordinary overload of addition assignment operator.
|
||||
*
|
||||
* Example of usage:
|
||||
* @snippet dynamic_graph.cpp GIOProtoArgs usage
|
||||
*
|
||||
*/
|
||||
template<typename Tg>
|
||||
friend GIOProtoArgs<Tg>& operator += (GIOProtoArgs<Tg> &lhs, const GIOProtoArgs<Tg> &rhs);
|
||||
};
|
||||
|
||||
template<typename Tg>
|
||||
cv::GIOProtoArgs<Tg>& operator += (cv::GIOProtoArgs<Tg> &lhs, const cv::GIOProtoArgs<Tg> &rhs)
|
||||
{
|
||||
lhs.m_args.reserve(lhs.m_args.size() + rhs.m_args.size());
|
||||
lhs.m_args.insert(lhs.m_args.end(), rhs.m_args.begin(), rhs.m_args.end());
|
||||
return lhs;
|
||||
}
|
||||
|
||||
struct In_Tag{};
|
||||
struct Out_Tag{};
|
||||
|
||||
@@ -111,7 +135,7 @@ GRunArg value_of(const GOrigin &origin);
|
||||
// Transform run-time computation arguments into a collection of metadata
|
||||
// extracted from that arguments
|
||||
GMetaArg GAPI_EXPORTS descr_of(const GRunArg &arg );
|
||||
GMetaArgs GAPI_EXPORTS descr_of(const GRunArgs &args);
|
||||
GMetaArgs GAPI_EXPORTS_W descr_of(const GRunArgs &args);
|
||||
|
||||
// Transform run-time operation result argument into metadata extracted from that argument
|
||||
// Used to compare the metadata, which generated at compile time with the metadata result operation in run time
|
||||
|
||||
@@ -26,10 +26,10 @@ struct GOrigin;
|
||||
* @{
|
||||
*/
|
||||
|
||||
class GAPI_EXPORTS GScalar
|
||||
class GAPI_EXPORTS_W_SIMPLE GScalar
|
||||
{
|
||||
public:
|
||||
GScalar(); // Empty constructor
|
||||
GAPI_WRAP GScalar(); // Empty constructor
|
||||
explicit GScalar(const cv::Scalar& s); // Constant value constructor from cv::Scalar
|
||||
explicit GScalar(cv::Scalar&& s); // Constant value move-constructor from cv::Scalar
|
||||
|
||||
|
||||
@@ -8,15 +8,99 @@
|
||||
#ifndef OPENCV_GAPI_GSTREAMING_COMPILED_HPP
|
||||
#define OPENCV_GAPI_GSTREAMING_COMPILED_HPP
|
||||
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
#include <opencv2/gapi/opencv_includes.hpp>
|
||||
#include <opencv2/gapi/own/assert.hpp>
|
||||
#include <opencv2/gapi/util/optional.hpp>
|
||||
#include <opencv2/gapi/garg.hpp>
|
||||
#include <opencv2/gapi/streaming/source.hpp>
|
||||
|
||||
namespace cv {
|
||||
|
||||
template<class T> using optional = cv::util::optional<T>;
|
||||
|
||||
namespace detail {
|
||||
template<typename T> struct wref_spec {
|
||||
using type = T;
|
||||
};
|
||||
template<typename T> struct wref_spec<std::vector<T> > {
|
||||
using type = T;
|
||||
};
|
||||
|
||||
template<typename RefHolder>
|
||||
struct OptRef {
|
||||
struct OptHolder {
|
||||
virtual void mov(RefHolder &h) = 0;
|
||||
virtual void reset() = 0;
|
||||
virtual ~OptHolder() = default;
|
||||
using Ptr = std::shared_ptr<OptHolder>;
|
||||
};
|
||||
template<class T> struct Holder final: OptHolder {
|
||||
std::reference_wrapper<cv::optional<T> > m_opt_ref;
|
||||
|
||||
explicit Holder(cv::optional<T>& opt) : m_opt_ref(std::ref(opt)) {
|
||||
}
|
||||
virtual void mov(RefHolder &h) override {
|
||||
using U = typename wref_spec<T>::type;
|
||||
m_opt_ref.get() = cv::util::make_optional(std::move(h.template wref<U>()));
|
||||
}
|
||||
virtual void reset() override {
|
||||
m_opt_ref.get().reset();
|
||||
}
|
||||
};
|
||||
template<class T>
|
||||
explicit OptRef(cv::optional<T>& t) : m_opt{new Holder<T>(t)} {}
|
||||
void mov(RefHolder &h) { m_opt->mov(h); }
|
||||
void reset() { m_opt->reset();}
|
||||
private:
|
||||
typename OptHolder::Ptr m_opt;
|
||||
};
|
||||
using OptionalVectorRef = OptRef<cv::detail::VectorRef>;
|
||||
using OptionalOpaqueRef = OptRef<cv::detail::OpaqueRef>;
|
||||
} // namespace detail
|
||||
|
||||
// TODO: Keep it in sync with GRunArgP (derive the type automatically?)
|
||||
using GOptRunArgP = util::variant<
|
||||
optional<cv::Mat>*,
|
||||
optional<cv::RMat>*,
|
||||
optional<cv::Scalar>*,
|
||||
cv::detail::OptionalVectorRef,
|
||||
cv::detail::OptionalOpaqueRef
|
||||
>;
|
||||
using GOptRunArgsP = std::vector<GOptRunArgP>;
|
||||
|
||||
namespace detail {
|
||||
|
||||
template<typename T> inline GOptRunArgP wrap_opt_arg(optional<T>& arg) {
|
||||
// By default, T goes to an OpaqueRef. All other types are specialized
|
||||
return GOptRunArgP{OptionalOpaqueRef(arg)};
|
||||
}
|
||||
|
||||
template<typename T> inline GOptRunArgP wrap_opt_arg(optional<std::vector<T> >& arg) {
|
||||
return GOptRunArgP{OptionalVectorRef(arg)};
|
||||
}
|
||||
|
||||
template<> inline GOptRunArgP wrap_opt_arg(optional<cv::Mat> &m) {
|
||||
return GOptRunArgP{&m};
|
||||
}
|
||||
|
||||
template<> inline GOptRunArgP wrap_opt_arg(optional<cv::Scalar> &s) {
|
||||
return GOptRunArgP{&s};
|
||||
}
|
||||
|
||||
} // namespace detail
|
||||
|
||||
// Now cv::gout() may produce an empty vector (see "dynamic graphs"), so
|
||||
// there may be a conflict between these two. State here that Opt version
|
||||
// _must_ have at least one input for this overload
|
||||
template<typename T, typename... Ts>
|
||||
inline GOptRunArgsP gout(optional<T>&arg, optional<Ts>&... args)
|
||||
{
|
||||
return GOptRunArgsP{ detail::wrap_opt_arg(arg), detail::wrap_opt_arg(args)... };
|
||||
}
|
||||
|
||||
/**
|
||||
* \addtogroup gapi_main_classes
|
||||
* @{
|
||||
@@ -49,11 +133,11 @@ namespace cv {
|
||||
*
|
||||
* @sa GCompiled
|
||||
*/
|
||||
class GAPI_EXPORTS GStreamingCompiled
|
||||
class GAPI_EXPORTS_W_SIMPLE GStreamingCompiled
|
||||
{
|
||||
public:
|
||||
class GAPI_EXPORTS Priv;
|
||||
GStreamingCompiled();
|
||||
GAPI_WRAP GStreamingCompiled();
|
||||
|
||||
// FIXME: More overloads?
|
||||
/**
|
||||
@@ -96,7 +180,7 @@ public:
|
||||
* @param ins vector of inputs to process.
|
||||
* @sa gin
|
||||
*/
|
||||
void setSource(GRunArgs &&ins);
|
||||
GAPI_WRAP void setSource(GRunArgs &&ins);
|
||||
|
||||
/**
|
||||
* @brief Specify an input video stream for a single-input
|
||||
@@ -109,7 +193,23 @@ public:
|
||||
* @param s a shared pointer to IStreamSource representing the
|
||||
* input video stream.
|
||||
*/
|
||||
void setSource(const gapi::wip::IStreamSource::Ptr& s);
|
||||
GAPI_WRAP void setSource(const gapi::wip::IStreamSource::Ptr& s);
|
||||
|
||||
/**
|
||||
* @brief Constructs and specifies an input video stream for a
|
||||
* single-input computation pipeline with the given parameters.
|
||||
*
|
||||
* Throws if pipeline is already running. Use stop() and then
|
||||
* setSource() to run the graph on a new video stream.
|
||||
*
|
||||
* @overload
|
||||
* @param args arguments used to contruct and initialize a stream
|
||||
* source.
|
||||
*/
|
||||
template<typename T, typename... Args>
|
||||
void setSource(Args&&... args) {
|
||||
setSource(cv::gapi::wip::make_src<T>(std::forward<Args>(args)...));
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Start the pipeline execution.
|
||||
@@ -126,7 +226,7 @@ public:
|
||||
* start()/stop()/setSource() may be called on the same object in
|
||||
* multiple threads in your application.
|
||||
*/
|
||||
void start();
|
||||
GAPI_WRAP void start();
|
||||
|
||||
/**
|
||||
* @brief Get the next processed frame from the pipeline.
|
||||
@@ -150,6 +250,47 @@ public:
|
||||
*/
|
||||
bool pull(cv::GRunArgsP &&outs);
|
||||
|
||||
// NB: Used from python
|
||||
GAPI_WRAP std::tuple<bool, cv::GRunArgs> pull();
|
||||
|
||||
/**
|
||||
* @brief Get some next available data from the pipeline.
|
||||
*
|
||||
* This method takes a vector of cv::optional object. An object is
|
||||
* assigned to some value if this value is available (ready) at
|
||||
* the time of the call, and resets the object to empty() if it is
|
||||
* not.
|
||||
*
|
||||
* This is a blocking method which guarantees that some data has
|
||||
* been written to the output vector on return.
|
||||
*
|
||||
* Using this method only makes sense if the graph has
|
||||
* desynchronized parts (see cv::gapi::desync). If there is no
|
||||
* desynchronized parts in the graph, the behavior of this
|
||||
* method is identical to the regular pull() (all data objects are
|
||||
* produced synchronously in the output vector).
|
||||
*
|
||||
* Use gout() to create an output parameter vector.
|
||||
*
|
||||
* Output vectors must have the same number of elements as defined
|
||||
* in the cv::GComputation protocol (at the moment of its
|
||||
* construction). Shapes of elements also must conform to protocol
|
||||
* (e.g. cv::optional<cv::Mat> needs to be passed where cv::GMat
|
||||
* has been declared as output, and so on). Run-time exception is
|
||||
* generated on type mismatch.
|
||||
*
|
||||
* This method writes new data into objects passed via output
|
||||
* vector. If there is no data ready yet, this method blocks. Use
|
||||
* try_pull() if you need a non-blocking version.
|
||||
*
|
||||
* @param outs vector of output parameters to obtain.
|
||||
* @return true if next result has been obtained,
|
||||
* false marks end of the stream.
|
||||
*
|
||||
* @sa cv::gapi::desync
|
||||
*/
|
||||
bool pull(cv::GOptRunArgsP &&outs);
|
||||
|
||||
/**
|
||||
* @brief Try to get the next processed frame from the pipeline.
|
||||
*
|
||||
@@ -172,7 +313,7 @@ public:
|
||||
*
|
||||
* Throws if the pipeline is not running.
|
||||
*/
|
||||
void stop();
|
||||
GAPI_WRAP void stop();
|
||||
|
||||
/**
|
||||
* @brief Test if the pipeline is running.
|
||||
@@ -184,7 +325,7 @@ public:
|
||||
*
|
||||
* @return true if the current stream is not over yet.
|
||||
*/
|
||||
bool running() const;
|
||||
GAPI_WRAP bool running() const;
|
||||
|
||||
/// @private
|
||||
Priv& priv();
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
//
|
||||
// Copyright (C) 2018 Intel Corporation
|
||||
// Copyright (C) 2018-2020 Intel Corporation
|
||||
|
||||
|
||||
#ifndef OPENCV_GAPI_GTYPE_TRAITS_HPP
|
||||
@@ -15,9 +15,10 @@
|
||||
#include <opencv2/gapi/gscalar.hpp>
|
||||
#include <opencv2/gapi/garray.hpp>
|
||||
#include <opencv2/gapi/gopaque.hpp>
|
||||
#include <opencv2/gapi/gframe.hpp>
|
||||
#include <opencv2/gapi/streaming/source.hpp>
|
||||
#include <opencv2/gapi/media.hpp>
|
||||
#include <opencv2/gapi/gcommon.hpp>
|
||||
#include <opencv2/gapi/own/convert.hpp>
|
||||
|
||||
namespace cv
|
||||
{
|
||||
@@ -36,6 +37,7 @@ namespace detail
|
||||
GOBJREF, // <internal> reference to object
|
||||
GMAT, // a cv::GMat
|
||||
GMATP, // a cv::GMatP
|
||||
GFRAME, // a cv::GFrame
|
||||
GSCALAR, // a cv::GScalar
|
||||
GARRAY, // a cv::GArrayU (note - exactly GArrayU, not GArray<T>!)
|
||||
GOPAQUE, // a cv::GOpaqueU (note - exactly GOpaqueU, not GOpaque<T>!)
|
||||
@@ -49,26 +51,37 @@ namespace detail
|
||||
template<typename T> struct GTypeTraits
|
||||
{
|
||||
static constexpr const ArgKind kind = ArgKind::OPAQUE_VAL;
|
||||
static constexpr const OpaqueKind op_kind = OpaqueKind::CV_UNKNOWN;
|
||||
};
|
||||
template<> struct GTypeTraits<cv::GMat>
|
||||
{
|
||||
static constexpr const ArgKind kind = ArgKind::GMAT;
|
||||
static constexpr const GShape shape = GShape::GMAT;
|
||||
static constexpr const OpaqueKind op_kind = OpaqueKind::CV_UNKNOWN;
|
||||
};
|
||||
template<> struct GTypeTraits<cv::GMatP>
|
||||
{
|
||||
static constexpr const ArgKind kind = ArgKind::GMATP;
|
||||
static constexpr const GShape shape = GShape::GMAT;
|
||||
static constexpr const OpaqueKind op_kind = OpaqueKind::CV_UNKNOWN;
|
||||
};
|
||||
template<> struct GTypeTraits<cv::GFrame>
|
||||
{
|
||||
static constexpr const ArgKind kind = ArgKind::GFRAME;
|
||||
static constexpr const GShape shape = GShape::GFRAME;
|
||||
static constexpr const OpaqueKind op_kind = OpaqueKind::CV_UNKNOWN;
|
||||
};
|
||||
template<> struct GTypeTraits<cv::GScalar>
|
||||
{
|
||||
static constexpr const ArgKind kind = ArgKind::GSCALAR;
|
||||
static constexpr const GShape shape = GShape::GSCALAR;
|
||||
static constexpr const OpaqueKind op_kind = OpaqueKind::CV_UNKNOWN;
|
||||
};
|
||||
template<class T> struct GTypeTraits<cv::GArray<T> >
|
||||
{
|
||||
static constexpr const ArgKind kind = ArgKind::GARRAY;
|
||||
static constexpr const GShape shape = GShape::GARRAY;
|
||||
static constexpr const OpaqueKind op_kind = GOpaqueTraits<T>::kind;
|
||||
using host_type = std::vector<T>;
|
||||
using strip_type = cv::detail::VectorRef;
|
||||
static cv::detail::GArrayU wrap_value(const cv::GArray<T> &t) { return t.strip();}
|
||||
@@ -79,6 +92,7 @@ namespace detail
|
||||
{
|
||||
static constexpr const ArgKind kind = ArgKind::GOPAQUE;
|
||||
static constexpr const GShape shape = GShape::GOPAQUE;
|
||||
static constexpr const OpaqueKind op_kind = GOpaqueTraits<T>::kind;
|
||||
using host_type = T;
|
||||
using strip_type = cv::detail::OpaqueRef;
|
||||
static cv::detail::GOpaqueU wrap_value(const cv::GOpaque<T> &t) { return t.strip();}
|
||||
@@ -105,13 +119,14 @@ namespace detail
|
||||
// and GMat behavior is correct for GMatP)
|
||||
template<typename T> struct GTypeOf;
|
||||
#if !defined(GAPI_STANDALONE)
|
||||
template<> struct GTypeOf<cv::Mat> { using type = cv::GMat; };
|
||||
template<> struct GTypeOf<cv::UMat> { using type = cv::GMat; };
|
||||
#endif // !defined(GAPI_STANDALONE)
|
||||
template<> struct GTypeOf<cv::gapi::own::Mat> { using type = cv::GMat; };
|
||||
template<> struct GTypeOf<cv::Mat> { using type = cv::GMat; };
|
||||
template<> struct GTypeOf<cv::RMat> { using type = cv::GMat; };
|
||||
template<> struct GTypeOf<cv::Scalar> { using type = cv::GScalar; };
|
||||
template<typename U> struct GTypeOf<std::vector<U> > { using type = cv::GArray<U>; };
|
||||
template<typename U> struct GTypeOf { using type = cv::GOpaque<U>;};
|
||||
template<> struct GTypeOf<cv::MediaFrame> { using type = cv::GFrame; };
|
||||
// FIXME: This is not quite correct since IStreamSource may produce not only Mat but also Scalar
|
||||
// and vector data. TODO: Extend the type dispatching on these types too.
|
||||
template<> struct GTypeOf<cv::gapi::wip::IStreamSource::Ptr> { using type = cv::GMat;};
|
||||
@@ -176,6 +191,29 @@ namespace detail
|
||||
|
||||
template<typename T> using wrap_gapi_helper = WrapValue<typename std::decay<T>::type>;
|
||||
template<typename T> using wrap_host_helper = WrapValue<typename std::decay<g_type_of_t<T> >::type>;
|
||||
|
||||
// Union type for various user-defined type constructors (GArray<T>,
|
||||
// GOpaque<T>, etc)
|
||||
//
|
||||
// TODO: Replace construct-only API with a more generic one (probably
|
||||
// with bits of introspection)
|
||||
//
|
||||
// Not required for non-user-defined types (GMat, GScalar, etc)
|
||||
using HostCtor = util::variant
|
||||
< util::monostate
|
||||
, detail::ConstructVec
|
||||
, detail::ConstructOpaque
|
||||
>;
|
||||
|
||||
template<typename T> struct GObtainCtor {
|
||||
static HostCtor get() { return HostCtor{}; }
|
||||
};
|
||||
template<typename T> struct GObtainCtor<GArray<T> > {
|
||||
static HostCtor get() { return HostCtor{ConstructVec{&GArray<T>::VCtor}}; };
|
||||
};
|
||||
template<typename T> struct GObtainCtor<GOpaque<T> > {
|
||||
static HostCtor get() { return HostCtor{ConstructOpaque{&GOpaque<T>::Ctor}}; };
|
||||
};
|
||||
} // namespace detail
|
||||
} // namespace cv
|
||||
|
||||
|
||||
@@ -26,6 +26,7 @@ namespace detail
|
||||
template<> struct ProtoToParam<cv::GMat> { using type = cv::Mat; };
|
||||
template<> struct ProtoToParam<cv::GScalar> { using type = cv::Scalar; };
|
||||
template<typename U> struct ProtoToParam<cv::GArray<U> > { using type = std::vector<U>; };
|
||||
template<> struct ProtoToParam<cv::GArray<cv::GMat>> { using type = std::vector<cv::Mat>; };
|
||||
template<typename U> struct ProtoToParam<cv::GOpaque<U> > { using type = U; };
|
||||
template<typename T> using ProtoToParamT = typename ProtoToParam<T>::type;
|
||||
|
||||
@@ -132,12 +133,20 @@ public:
|
||||
{
|
||||
}
|
||||
|
||||
void apply(detail::ProtoToParamT<Args>... inArgs,
|
||||
detail::ProtoToParamT<R> &outArg,
|
||||
GCompileArgs &&args)
|
||||
{
|
||||
m_comp.apply(cv::gin(inArgs...), cv::gout(outArg), std::move(args));
|
||||
}
|
||||
|
||||
void apply(detail::ProtoToParamT<Args>... inArgs,
|
||||
detail::ProtoToParamT<R> &outArg)
|
||||
{
|
||||
m_comp.apply(cv::gin(inArgs...), cv::gout(outArg));
|
||||
apply(inArgs..., outArg, GCompileArgs());
|
||||
}
|
||||
|
||||
|
||||
GCompiledT compile(detail::ProtoToMetaT<Args>... inDescs)
|
||||
{
|
||||
GMetaArgs inMetas = { GMetaArg(inDescs)... };
|
||||
@@ -205,12 +214,20 @@ public:
|
||||
{
|
||||
}
|
||||
|
||||
void apply(detail::ProtoToParamT<Args>... inArgs,
|
||||
detail::ProtoToParamT<R>&... outArgs,
|
||||
GCompileArgs &&args)
|
||||
{
|
||||
m_comp.apply(cv::gin(inArgs...), cv::gout(outArgs...), std::move(args));
|
||||
}
|
||||
|
||||
void apply(detail::ProtoToParamT<Args>... inArgs,
|
||||
detail::ProtoToParamT<R>&... outArgs)
|
||||
{
|
||||
m_comp.apply(cv::gin(inArgs...), cv::gout(outArgs...));
|
||||
apply(inArgs..., outArgs..., GCompileArgs());
|
||||
}
|
||||
|
||||
|
||||
GCompiledT compile(detail::ProtoToMetaT<Args>... inDescs)
|
||||
{
|
||||
GMetaArgs inMetas = { GMetaArg(inDescs)... };
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
//
|
||||
// Copyright (C) 2018 Intel Corporation
|
||||
// Copyright (C) 2018-2020 Intel Corporation
|
||||
|
||||
|
||||
#ifndef OPENCV_GAPI_IMGPROC_HPP
|
||||
@@ -21,14 +21,45 @@
|
||||
@{
|
||||
@defgroup gapi_filters Graph API: Image filters
|
||||
@defgroup gapi_colorconvert Graph API: Converting image from one color space to another
|
||||
@defgroup gapi_feature Graph API: Image Feature Detection
|
||||
@defgroup gapi_shape Graph API: Image Structural Analysis and Shape Descriptors
|
||||
@}
|
||||
*/
|
||||
|
||||
namespace {
|
||||
void validateFindingContoursMeta(const int depth, const int chan, const int mode)
|
||||
{
|
||||
GAPI_Assert(chan == 1);
|
||||
switch (mode)
|
||||
{
|
||||
case cv::RETR_CCOMP:
|
||||
GAPI_Assert(depth == CV_8U || depth == CV_32S);
|
||||
break;
|
||||
case cv::RETR_FLOODFILL:
|
||||
GAPI_Assert(depth == CV_32S);
|
||||
break;
|
||||
default:
|
||||
GAPI_Assert(depth == CV_8U);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Checks if the passed mat is a set of n-dimentional points of the given depth
|
||||
bool isPointsVector(const int chan, const cv::Size &size, const int depth,
|
||||
const int n, const int ddepth = -1)
|
||||
{
|
||||
return (ddepth == depth || ddepth < 0) &&
|
||||
((chan == n && (size.height == 1 || size.width == 1)) ||
|
||||
(chan == 1 && size.width == n));
|
||||
}
|
||||
} // anonymous namespace
|
||||
|
||||
namespace cv { namespace gapi {
|
||||
|
||||
namespace imgproc {
|
||||
using GMat2 = std::tuple<GMat,GMat>;
|
||||
using GMat3 = std::tuple<GMat,GMat,GMat>; // FIXME: how to avoid this?
|
||||
using GFindContoursOutput = std::tuple<GArray<GArray<Point>>,GArray<Vec4i>>;
|
||||
|
||||
G_TYPED_KERNEL(GFilter2D, <GMat(GMat,int,Mat,Point,Scalar,int,Scalar)>,"org.opencv.imgproc.filters.filter2D") {
|
||||
static GMatDesc outMeta(GMatDesc in, int ddepth, Mat, Point, Scalar, int, Scalar) {
|
||||
@@ -78,6 +109,14 @@ namespace imgproc {
|
||||
}
|
||||
};
|
||||
|
||||
G_TYPED_KERNEL(GMorphologyEx, <GMat(GMat,MorphTypes,Mat,Point,int,BorderTypes,Scalar)>,
|
||||
"org.opencv.imgproc.filters.morphologyEx") {
|
||||
static GMatDesc outMeta(const GMatDesc &in, MorphTypes, Mat, Point, int,
|
||||
BorderTypes, Scalar) {
|
||||
return in;
|
||||
}
|
||||
};
|
||||
|
||||
G_TYPED_KERNEL(GSobel, <GMat(GMat,int,int,int,int,double,double,int,Scalar)>, "org.opencv.imgproc.filters.sobel") {
|
||||
static GMatDesc outMeta(GMatDesc in, int ddepth, int, int, int, double, double, int, Scalar) {
|
||||
return in.withDepth(ddepth);
|
||||
@@ -90,18 +129,181 @@ namespace imgproc {
|
||||
}
|
||||
};
|
||||
|
||||
G_TYPED_KERNEL(GLaplacian, <GMat(GMat,int, int, double, double, int)>,
|
||||
"org.opencv.imgproc.filters.laplacian") {
|
||||
static GMatDesc outMeta(GMatDesc in, int ddepth, int, double, double, int) {
|
||||
return in.withDepth(ddepth);
|
||||
}
|
||||
};
|
||||
|
||||
G_TYPED_KERNEL(GBilateralFilter, <GMat(GMat,int, double, double, int)>,
|
||||
"org.opencv.imgproc.filters.bilateralfilter") {
|
||||
static GMatDesc outMeta(GMatDesc in, int, double, double, int) {
|
||||
return in;
|
||||
}
|
||||
};
|
||||
|
||||
G_TYPED_KERNEL(GEqHist, <GMat(GMat)>, "org.opencv.imgproc.equalizeHist"){
|
||||
static GMatDesc outMeta(GMatDesc in) {
|
||||
return in.withType(CV_8U, 1);
|
||||
}
|
||||
};
|
||||
|
||||
G_TYPED_KERNEL(GCanny, <GMat(GMat,double,double,int,bool)>, "org.opencv.imgproc.canny"){
|
||||
G_TYPED_KERNEL(GCanny, <GMat(GMat,double,double,int,bool)>, "org.opencv.imgproc.feature.canny"){
|
||||
static GMatDesc outMeta(GMatDesc in, double, double, int, bool) {
|
||||
return in.withType(CV_8U, 1);
|
||||
}
|
||||
};
|
||||
|
||||
G_TYPED_KERNEL(GGoodFeatures,
|
||||
<cv::GArray<cv::Point2f>(GMat,int,double,double,Mat,int,bool,double)>,
|
||||
"org.opencv.imgproc.feature.goodFeaturesToTrack") {
|
||||
static GArrayDesc outMeta(GMatDesc, int, double, double, const Mat&, int, bool, double) {
|
||||
return empty_array_desc();
|
||||
}
|
||||
};
|
||||
|
||||
using RetrMode = RetrievalModes;
|
||||
using ContMethod = ContourApproximationModes;
|
||||
G_TYPED_KERNEL(GFindContours, <GArray<GArray<Point>>(GMat,RetrMode,ContMethod,GOpaque<Point>)>,
|
||||
"org.opencv.imgproc.shape.findContours")
|
||||
{
|
||||
static GArrayDesc outMeta(GMatDesc in, RetrMode mode, ContMethod, GOpaqueDesc)
|
||||
{
|
||||
validateFindingContoursMeta(in.depth, in.chan, mode);
|
||||
return empty_array_desc();
|
||||
}
|
||||
};
|
||||
|
||||
// FIXME oc: make default value offset = Point()
|
||||
G_TYPED_KERNEL(GFindContoursNoOffset, <GArray<GArray<Point>>(GMat,RetrMode,ContMethod)>,
|
||||
"org.opencv.imgproc.shape.findContoursNoOffset")
|
||||
{
|
||||
static GArrayDesc outMeta(GMatDesc in, RetrMode mode, ContMethod)
|
||||
{
|
||||
validateFindingContoursMeta(in.depth, in.chan, mode);
|
||||
return empty_array_desc();
|
||||
}
|
||||
};
|
||||
|
||||
G_TYPED_KERNEL(GFindContoursH,<GFindContoursOutput(GMat,RetrMode,ContMethod,GOpaque<Point>)>,
|
||||
"org.opencv.imgproc.shape.findContoursH")
|
||||
{
|
||||
static std::tuple<GArrayDesc,GArrayDesc>
|
||||
outMeta(GMatDesc in, RetrMode mode, ContMethod, GOpaqueDesc)
|
||||
{
|
||||
validateFindingContoursMeta(in.depth, in.chan, mode);
|
||||
return std::make_tuple(empty_array_desc(), empty_array_desc());
|
||||
}
|
||||
};
|
||||
|
||||
// FIXME oc: make default value offset = Point()
|
||||
G_TYPED_KERNEL(GFindContoursHNoOffset,<GFindContoursOutput(GMat,RetrMode,ContMethod)>,
|
||||
"org.opencv.imgproc.shape.findContoursHNoOffset")
|
||||
{
|
||||
static std::tuple<GArrayDesc,GArrayDesc>
|
||||
outMeta(GMatDesc in, RetrMode mode, ContMethod)
|
||||
{
|
||||
validateFindingContoursMeta(in.depth, in.chan, mode);
|
||||
return std::make_tuple(empty_array_desc(), empty_array_desc());
|
||||
}
|
||||
};
|
||||
|
||||
G_TYPED_KERNEL(GBoundingRectMat, <GOpaque<Rect>(GMat)>,
|
||||
"org.opencv.imgproc.shape.boundingRectMat") {
|
||||
static GOpaqueDesc outMeta(GMatDesc in) {
|
||||
GAPI_Assert((in.depth == CV_8U && in.chan == 1) ||
|
||||
(isPointsVector(in.chan, in.size, in.depth, 2, CV_32S) ||
|
||||
isPointsVector(in.chan, in.size, in.depth, 2, CV_32F)));
|
||||
|
||||
return empty_gopaque_desc();
|
||||
}
|
||||
};
|
||||
|
||||
G_TYPED_KERNEL(GBoundingRectVector32S, <GOpaque<Rect>(GArray<Point2i>)>,
|
||||
"org.opencv.imgproc.shape.boundingRectVector32S") {
|
||||
static GOpaqueDesc outMeta(GArrayDesc) {
|
||||
return empty_gopaque_desc();
|
||||
}
|
||||
};
|
||||
|
||||
G_TYPED_KERNEL(GBoundingRectVector32F, <GOpaque<Rect>(GArray<Point2f>)>,
|
||||
"org.opencv.imgproc.shape.boundingRectVector32F") {
|
||||
static GOpaqueDesc outMeta(GArrayDesc) {
|
||||
return empty_gopaque_desc();
|
||||
}
|
||||
};
|
||||
|
||||
G_TYPED_KERNEL(GFitLine2DMat, <GOpaque<Vec4f>(GMat,DistanceTypes,double,double,double)>,
|
||||
"org.opencv.imgproc.shape.fitLine2DMat") {
|
||||
static GOpaqueDesc outMeta(GMatDesc in,DistanceTypes,double,double,double) {
|
||||
GAPI_Assert(isPointsVector(in.chan, in.size, in.depth, 2, -1));
|
||||
return empty_gopaque_desc();
|
||||
}
|
||||
};
|
||||
|
||||
G_TYPED_KERNEL(GFitLine2DVector32S,
|
||||
<GOpaque<Vec4f>(GArray<Point2i>,DistanceTypes,double,double,double)>,
|
||||
"org.opencv.imgproc.shape.fitLine2DVector32S") {
|
||||
static GOpaqueDesc outMeta(GArrayDesc,DistanceTypes,double,double,double) {
|
||||
return empty_gopaque_desc();
|
||||
}
|
||||
};
|
||||
|
||||
G_TYPED_KERNEL(GFitLine2DVector32F,
|
||||
<GOpaque<Vec4f>(GArray<Point2f>,DistanceTypes,double,double,double)>,
|
||||
"org.opencv.imgproc.shape.fitLine2DVector32F") {
|
||||
static GOpaqueDesc outMeta(GArrayDesc,DistanceTypes,double,double,double) {
|
||||
return empty_gopaque_desc();
|
||||
}
|
||||
};
|
||||
|
||||
G_TYPED_KERNEL(GFitLine2DVector64F,
|
||||
<GOpaque<Vec4f>(GArray<Point2d>,DistanceTypes,double,double,double)>,
|
||||
"org.opencv.imgproc.shape.fitLine2DVector64F") {
|
||||
static GOpaqueDesc outMeta(GArrayDesc,DistanceTypes,double,double,double) {
|
||||
return empty_gopaque_desc();
|
||||
}
|
||||
};
|
||||
|
||||
G_TYPED_KERNEL(GFitLine3DMat, <GOpaque<Vec6f>(GMat,DistanceTypes,double,double,double)>,
|
||||
"org.opencv.imgproc.shape.fitLine3DMat") {
|
||||
static GOpaqueDesc outMeta(GMatDesc in,int,double,double,double) {
|
||||
GAPI_Assert(isPointsVector(in.chan, in.size, in.depth, 3, -1));
|
||||
return empty_gopaque_desc();
|
||||
}
|
||||
};
|
||||
|
||||
G_TYPED_KERNEL(GFitLine3DVector32S,
|
||||
<GOpaque<Vec6f>(GArray<Point3i>,DistanceTypes,double,double,double)>,
|
||||
"org.opencv.imgproc.shape.fitLine3DVector32S") {
|
||||
static GOpaqueDesc outMeta(GArrayDesc,DistanceTypes,double,double,double) {
|
||||
return empty_gopaque_desc();
|
||||
}
|
||||
};
|
||||
|
||||
G_TYPED_KERNEL(GFitLine3DVector32F,
|
||||
<GOpaque<Vec6f>(GArray<Point3f>,DistanceTypes,double,double,double)>,
|
||||
"org.opencv.imgproc.shape.fitLine3DVector32F") {
|
||||
static GOpaqueDesc outMeta(GArrayDesc,DistanceTypes,double,double,double) {
|
||||
return empty_gopaque_desc();
|
||||
}
|
||||
};
|
||||
|
||||
G_TYPED_KERNEL(GFitLine3DVector64F,
|
||||
<GOpaque<Vec6f>(GArray<Point3d>,DistanceTypes,double,double,double)>,
|
||||
"org.opencv.imgproc.shape.fitLine3DVector64F") {
|
||||
static GOpaqueDesc outMeta(GArrayDesc,DistanceTypes,double,double,double) {
|
||||
return empty_gopaque_desc();
|
||||
}
|
||||
};
|
||||
|
||||
G_TYPED_KERNEL(GBGR2RGB, <GMat(GMat)>, "org.opencv.imgproc.colorconvert.bgr2rgb") {
|
||||
static GMatDesc outMeta(GMatDesc in) {
|
||||
return in; // type still remains CV_8UC3;
|
||||
}
|
||||
};
|
||||
|
||||
G_TYPED_KERNEL(GRGB2YUV, <GMat(GMat)>, "org.opencv.imgproc.colorconvert.rgb2yuv") {
|
||||
static GMatDesc outMeta(GMatDesc in) {
|
||||
return in; // type still remains CV_8UC3;
|
||||
@@ -114,6 +316,42 @@ namespace imgproc {
|
||||
}
|
||||
};
|
||||
|
||||
G_TYPED_KERNEL(GBGR2I420, <GMat(GMat)>, "org.opencv.imgproc.colorconvert.bgr2i420") {
|
||||
static GMatDesc outMeta(GMatDesc in) {
|
||||
GAPI_Assert(in.depth == CV_8U);
|
||||
GAPI_Assert(in.chan == 3);
|
||||
GAPI_Assert(in.size.height % 2 == 0);
|
||||
return in.withType(in.depth, 1).withSize(Size(in.size.width, in.size.height * 3 / 2));
|
||||
}
|
||||
};
|
||||
|
||||
G_TYPED_KERNEL(GRGB2I420, <GMat(GMat)>, "org.opencv.imgproc.colorconvert.rgb2i420") {
|
||||
static GMatDesc outMeta(GMatDesc in) {
|
||||
GAPI_Assert(in.depth == CV_8U);
|
||||
GAPI_Assert(in.chan == 3);
|
||||
GAPI_Assert(in.size.height % 2 == 0);
|
||||
return in.withType(in.depth, 1).withSize(Size(in.size.width, in.size.height * 3 / 2));
|
||||
}
|
||||
};
|
||||
|
||||
G_TYPED_KERNEL(GI4202BGR, <GMat(GMat)>, "org.opencv.imgproc.colorconvert.i4202bgr") {
|
||||
static GMatDesc outMeta(GMatDesc in) {
|
||||
GAPI_Assert(in.depth == CV_8U);
|
||||
GAPI_Assert(in.chan == 1);
|
||||
GAPI_Assert(in.size.height % 3 == 0);
|
||||
return in.withType(in.depth, 3).withSize(Size(in.size.width, in.size.height * 2 / 3));
|
||||
}
|
||||
};
|
||||
|
||||
G_TYPED_KERNEL(GI4202RGB, <GMat(GMat)>, "org.opencv.imgproc.colorconvert.i4202rgb") {
|
||||
static GMatDesc outMeta(GMatDesc in) {
|
||||
GAPI_Assert(in.depth == CV_8U);
|
||||
GAPI_Assert(in.chan == 1);
|
||||
GAPI_Assert(in.size.height % 3 == 0);
|
||||
return in.withType(in.depth, 3).withSize(Size(in.size.width, in.size.height * 2 / 3));
|
||||
}
|
||||
};
|
||||
|
||||
G_TYPED_KERNEL(GNV12toRGB, <GMat(GMat, GMat)>, "org.opencv.imgproc.colorconvert.nv12torgb") {
|
||||
static GMatDesc outMeta(GMatDesc in_y, GMatDesc in_uv) {
|
||||
GAPI_Assert(in_y.chan == 1);
|
||||
@@ -208,7 +446,7 @@ namespace imgproc {
|
||||
}
|
||||
};
|
||||
|
||||
G_TYPED_KERNEL(GNV12toRGBp, <GMatP(GMat,GMat)>, "org.opencv.colorconvert.imgproc.nv12torgbp") {
|
||||
G_TYPED_KERNEL(GNV12toRGBp, <GMatP(GMat,GMat)>, "org.opencv.imgproc.colorconvert.nv12torgbp") {
|
||||
static GMatDesc outMeta(GMatDesc inY, GMatDesc inUV) {
|
||||
GAPI_Assert(inY.depth == CV_8U);
|
||||
GAPI_Assert(inUV.depth == CV_8U);
|
||||
@@ -222,7 +460,7 @@ namespace imgproc {
|
||||
}
|
||||
};
|
||||
|
||||
G_TYPED_KERNEL(GNV12toGray, <GMat(GMat,GMat)>, "org.opencv.colorconvert.imgproc.nv12togray") {
|
||||
G_TYPED_KERNEL(GNV12toGray, <GMat(GMat,GMat)>, "org.opencv.imgproc.colorconvert.nv12togray") {
|
||||
static GMatDesc outMeta(GMatDesc inY, GMatDesc inUV) {
|
||||
GAPI_Assert(inY.depth == CV_8U);
|
||||
GAPI_Assert(inUV.depth == CV_8U);
|
||||
@@ -237,7 +475,7 @@ namespace imgproc {
|
||||
}
|
||||
};
|
||||
|
||||
G_TYPED_KERNEL(GNV12toBGRp, <GMatP(GMat,GMat)>, "org.opencv.colorconvert.imgproc.nv12tobgrp") {
|
||||
G_TYPED_KERNEL(GNV12toBGRp, <GMatP(GMat,GMat)>, "org.opencv.imgproc.colorconvert.nv12tobgrp") {
|
||||
static GMatDesc outMeta(GMatDesc inY, GMatDesc inUV) {
|
||||
GAPI_Assert(inY.depth == CV_8U);
|
||||
GAPI_Assert(inUV.depth == CV_8U);
|
||||
@@ -251,8 +489,7 @@ namespace imgproc {
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
} //namespace imgproc
|
||||
|
||||
//! @addtogroup gapi_filters
|
||||
//! @{
|
||||
@@ -298,7 +535,7 @@ according to the specified border mode.
|
||||
|
||||
The function does actually compute correlation, not the convolution:
|
||||
|
||||
\f[\texttt{dst} (x,y) = \sum _{ \stackrel{0\leq x' < \texttt{kernel.cols},}{0\leq y' < \texttt{kernel.rows}} } \texttt{kernel} (x',y')* \texttt{src} (x+x'- \texttt{anchor.x} ,y+y'- \texttt{anchor.y} )\f]
|
||||
\f[\texttt{dst} (x,y) = \sum _{ \substack{0\leq x' < \texttt{kernel.cols}\\{0\leq y' < \texttt{kernel.rows}}}} \texttt{kernel} (x',y')* \texttt{src} (x+x'- \texttt{anchor.x} ,y+y'- \texttt{anchor.y} )\f]
|
||||
|
||||
That is, the kernel is not mirrored around the anchor point. If you need a real convolution, flip
|
||||
the kernel using flip and set the new anchor to `(kernel.cols - anchor.x - 1, kernel.rows -
|
||||
@@ -335,7 +572,7 @@ The function smooths an image using the kernel:
|
||||
|
||||
where
|
||||
|
||||
\f[\alpha = \fork{\frac{1}{\texttt{ksize.width*ksize.height}}}{when \texttt{normalize=true}}{1}{otherwise}\f]
|
||||
\f[\alpha = \begin{cases} \frac{1}{\texttt{ksize.width*ksize.height}} & \texttt{when } \texttt{normalize=true} \\1 & \texttt{otherwise} \end{cases}\f]
|
||||
|
||||
Unnormalized box filter is useful for computing various integral characteristics over each pixel
|
||||
neighborhood, such as covariance matrices of image derivatives (used in dense optical flow
|
||||
@@ -367,8 +604,8 @@ The function smooths an image using the kernel:
|
||||
|
||||
\f[\texttt{K} = \frac{1}{\texttt{ksize.width*ksize.height}} \begin{bmatrix} 1 & 1 & 1 & \cdots & 1 & 1 \\ 1 & 1 & 1 & \cdots & 1 & 1 \\ \hdotsfor{6} \\ 1 & 1 & 1 & \cdots & 1 & 1 \\ \end{bmatrix}\f]
|
||||
|
||||
The call `blur(src, dst, ksize, anchor, borderType)` is equivalent to `boxFilter(src, dst, src.type(),
|
||||
anchor, true, borderType)`.
|
||||
The call `blur(src, ksize, anchor, borderType)` is equivalent to `boxFilter(src, src.type(), ksize, anchor,
|
||||
true, borderType)`.
|
||||
|
||||
Supported input matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.
|
||||
Output image must have the same type, size, and number of channels as the input image.
|
||||
@@ -434,7 +671,7 @@ The median filter uses cv::BORDER_REPLICATE internally to cope with border pixel
|
||||
@param ksize aperture linear size; it must be odd and greater than 1, for example: 3, 5, 7 ...
|
||||
@sa boxFilter, gaussianBlur
|
||||
*/
|
||||
GAPI_EXPORTS GMat medianBlur(const GMat& src, int ksize);
|
||||
GAPI_EXPORTS_W GMat medianBlur(const GMat& src, int ksize);
|
||||
|
||||
/** @brief Erodes an image by using a specific structuring element.
|
||||
|
||||
@@ -458,7 +695,7 @@ anchor is at the element center.
|
||||
@param iterations number of times erosion is applied.
|
||||
@param borderType pixel extrapolation method, see cv::BorderTypes
|
||||
@param borderValue border value in case of a constant border
|
||||
@sa dilate
|
||||
@sa dilate, morphologyEx
|
||||
*/
|
||||
GAPI_EXPORTS GMat erode(const GMat& src, const Mat& kernel, const Point& anchor = Point(-1,-1), int iterations = 1,
|
||||
int borderType = BORDER_CONSTANT,
|
||||
@@ -533,6 +770,37 @@ GAPI_EXPORTS GMat dilate3x3(const GMat& src, int iterations = 1,
|
||||
int borderType = BORDER_CONSTANT,
|
||||
const Scalar& borderValue = morphologyDefaultBorderValue());
|
||||
|
||||
/** @brief Performs advanced morphological transformations.
|
||||
|
||||
The function can perform advanced morphological transformations using an erosion and dilation as
|
||||
basic operations.
|
||||
|
||||
Any of the operations can be done in-place. In case of multi-channel images, each channel is
|
||||
processed independently.
|
||||
|
||||
@note Function textual ID is "org.opencv.imgproc.filters.morphologyEx"
|
||||
|
||||
@param src Input image.
|
||||
@param op Type of a morphological operation, see #MorphTypes
|
||||
@param kernel Structuring element. It can be created using #getStructuringElement.
|
||||
@param anchor Anchor position within the element. Both negative values mean that the anchor is at
|
||||
the kernel center.
|
||||
@param iterations Number of times erosion and dilation are applied.
|
||||
@param borderType Pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported.
|
||||
@param borderValue Border value in case of a constant border. The default value has a special
|
||||
meaning.
|
||||
@sa dilate, erode, getStructuringElement
|
||||
@note The number of iterations is the number of times erosion or dilatation operation will be
|
||||
applied. For instance, an opening operation (#MORPH_OPEN) with two iterations is equivalent to
|
||||
apply successively: erode -> erode -> dilate -> dilate
|
||||
(and not erode -> dilate -> erode -> dilate).
|
||||
*/
|
||||
GAPI_EXPORTS GMat morphologyEx(const GMat &src, const MorphTypes op, const Mat &kernel,
|
||||
const Point &anchor = Point(-1,-1),
|
||||
const int iterations = 1,
|
||||
const BorderTypes borderType = BORDER_CONSTANT,
|
||||
const Scalar &borderValue = morphologyDefaultBorderValue());
|
||||
|
||||
/** @brief Calculates the first, second, third, or mixed image derivatives using an extended Sobel operator.
|
||||
|
||||
In all cases except one, the \f$\texttt{ksize} \times \texttt{ksize}\f$ separable kernel is used to
|
||||
@@ -636,6 +904,72 @@ GAPI_EXPORTS std::tuple<GMat, GMat> SobelXY(const GMat& src, int ddepth, int ord
|
||||
int borderType = BORDER_DEFAULT,
|
||||
const Scalar& borderValue = Scalar(0));
|
||||
|
||||
/** @brief Calculates the Laplacian of an image.
|
||||
|
||||
The function calculates the Laplacian of the source image by adding up the second x and y
|
||||
derivatives calculated using the Sobel operator:
|
||||
|
||||
\f[\texttt{dst} = \Delta \texttt{src} = \frac{\partial^2 \texttt{src}}{\partial x^2} + \frac{\partial^2 \texttt{src}}{\partial y^2}\f]
|
||||
|
||||
This is done when `ksize > 1`. When `ksize == 1`, the Laplacian is computed by filtering the image
|
||||
with the following \f$3 \times 3\f$ aperture:
|
||||
|
||||
\f[\vecthreethree {0}{1}{0}{1}{-4}{1}{0}{1}{0}\f]
|
||||
|
||||
@note Function textual ID is "org.opencv.imgproc.filters.laplacian"
|
||||
|
||||
@param src Source image.
|
||||
@param ddepth Desired depth of the destination image.
|
||||
@param ksize Aperture size used to compute the second-derivative filters. See #getDerivKernels for
|
||||
details. The size must be positive and odd.
|
||||
@param scale Optional scale factor for the computed Laplacian values. By default, no scaling is
|
||||
applied. See #getDerivKernels for details.
|
||||
@param delta Optional delta value that is added to the results prior to storing them in dst .
|
||||
@param borderType Pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported.
|
||||
@return Destination image of the same size and the same number of channels as src.
|
||||
@sa Sobel, Scharr
|
||||
*/
|
||||
GAPI_EXPORTS GMat Laplacian(const GMat& src, int ddepth, int ksize = 1,
|
||||
double scale = 1, double delta = 0, int borderType = BORDER_DEFAULT);
|
||||
|
||||
/** @brief Applies the bilateral filter to an image.
|
||||
|
||||
The function applies bilateral filtering to the input image, as described in
|
||||
http://www.dai.ed.ac.uk/CVonline/LOCAL_COPIES/MANDUCHI1/Bilateral_Filtering.html
|
||||
bilateralFilter can reduce unwanted noise very well while keeping edges fairly sharp. However, it is
|
||||
very slow compared to most filters.
|
||||
|
||||
_Sigma values_: For simplicity, you can set the 2 sigma values to be the same. If they are small (\<
|
||||
10), the filter will not have much effect, whereas if they are large (\> 150), they will have a very
|
||||
strong effect, making the image look "cartoonish".
|
||||
|
||||
_Filter size_: Large filters (d \> 5) are very slow, so it is recommended to use d=5 for real-time
|
||||
applications, and perhaps d=9 for offline applications that need heavy noise filtering.
|
||||
|
||||
This filter does not work inplace.
|
||||
|
||||
@note Function textual ID is "org.opencv.imgproc.filters.bilateralfilter"
|
||||
|
||||
@param src Source 8-bit or floating-point, 1-channel or 3-channel image.
|
||||
@param d Diameter of each pixel neighborhood that is used during filtering. If it is non-positive,
|
||||
it is computed from sigmaSpace.
|
||||
@param sigmaColor Filter sigma in the color space. A larger value of the parameter means that
|
||||
farther colors within the pixel neighborhood (see sigmaSpace) will be mixed together, resulting
|
||||
in larger areas of semi-equal color.
|
||||
@param sigmaSpace Filter sigma in the coordinate space. A larger value of the parameter means that
|
||||
farther pixels will influence each other as long as their colors are close enough (see sigmaColor
|
||||
). When d\>0, it specifies the neighborhood size regardless of sigmaSpace. Otherwise, d is
|
||||
proportional to sigmaSpace.
|
||||
@param borderType border mode used to extrapolate pixels outside of the image, see #BorderTypes
|
||||
@return Destination image of the same size and type as src.
|
||||
*/
|
||||
GAPI_EXPORTS GMat bilateralFilter(const GMat& src, int d, double sigmaColor, double sigmaSpace,
|
||||
int borderType = BORDER_DEFAULT);
|
||||
|
||||
//! @} gapi_filters
|
||||
|
||||
//! @addtogroup gapi_feature
|
||||
//! @{
|
||||
/** @brief Finds edges in an image using the Canny algorithm.
|
||||
|
||||
The function finds edges in the input image and marks them in the output map edges using the
|
||||
@@ -643,7 +977,7 @@ Canny algorithm. The smallest value between threshold1 and threshold2 is used fo
|
||||
largest value is used to find initial segments of strong edges. See
|
||||
<http://en.wikipedia.org/wiki/Canny_edge_detector>
|
||||
|
||||
@note Function textual ID is "org.opencv.imgproc.filters.canny"
|
||||
@note Function textual ID is "org.opencv.imgproc.feature.canny"
|
||||
|
||||
@param image 8-bit input image.
|
||||
@param threshold1 first threshold for the hysteresis procedure.
|
||||
@@ -657,8 +991,63 @@ L2gradient=false ).
|
||||
GAPI_EXPORTS GMat Canny(const GMat& image, double threshold1, double threshold2,
|
||||
int apertureSize = 3, bool L2gradient = false);
|
||||
|
||||
/** @brief Determines strong corners on an image.
|
||||
|
||||
The function finds the most prominent corners in the image or in the specified image region, as
|
||||
described in @cite Shi94
|
||||
|
||||
- Function calculates the corner quality measure at every source image pixel using the
|
||||
#cornerMinEigenVal or #cornerHarris .
|
||||
- Function performs a non-maximum suppression (the local maximums in *3 x 3* neighborhood are
|
||||
retained).
|
||||
- The corners with the minimal eigenvalue less than
|
||||
\f$\texttt{qualityLevel} \cdot \max_{x,y} qualityMeasureMap(x,y)\f$ are rejected.
|
||||
- The remaining corners are sorted by the quality measure in the descending order.
|
||||
- Function throws away each corner for which there is a stronger corner at a distance less than
|
||||
maxDistance.
|
||||
|
||||
The function can be used to initialize a point-based tracker of an object.
|
||||
|
||||
@note If the function is called with different values A and B of the parameter qualityLevel , and
|
||||
A \> B, the vector of returned corners with qualityLevel=A will be the prefix of the output vector
|
||||
with qualityLevel=B .
|
||||
|
||||
@note Function textual ID is "org.opencv.imgproc.feature.goodFeaturesToTrack"
|
||||
|
||||
@param image Input 8-bit or floating-point 32-bit, single-channel image.
|
||||
@param maxCorners Maximum number of corners to return. If there are more corners than are found,
|
||||
the strongest of them is returned. `maxCorners <= 0` implies that no limit on the maximum is set
|
||||
and all detected corners are returned.
|
||||
@param qualityLevel Parameter characterizing the minimal accepted quality of image corners. The
|
||||
parameter value is multiplied by the best corner quality measure, which is the minimal eigenvalue
|
||||
(see #cornerMinEigenVal ) or the Harris function response (see #cornerHarris ). The corners with the
|
||||
quality measure less than the product are rejected. For example, if the best corner has the
|
||||
quality measure = 1500, and the qualityLevel=0.01 , then all the corners with the quality measure
|
||||
less than 15 are rejected.
|
||||
@param minDistance Minimum possible Euclidean distance between the returned corners.
|
||||
@param mask Optional region of interest. If the image is not empty (it needs to have the type
|
||||
CV_8UC1 and the same size as image ), it specifies the region in which the corners are detected.
|
||||
@param blockSize Size of an average block for computing a derivative covariation matrix over each
|
||||
pixel neighborhood. See cornerEigenValsAndVecs .
|
||||
@param useHarrisDetector Parameter indicating whether to use a Harris detector (see #cornerHarris)
|
||||
or #cornerMinEigenVal.
|
||||
@param k Free parameter of the Harris detector.
|
||||
|
||||
@return vector of detected corners.
|
||||
*/
|
||||
GAPI_EXPORTS GArray<Point2f> goodFeaturesToTrack(const GMat &image,
|
||||
int maxCorners,
|
||||
double qualityLevel,
|
||||
double minDistance,
|
||||
const Mat &mask = Mat(),
|
||||
int blockSize = 3,
|
||||
bool useHarrisDetector = false,
|
||||
double k = 0.04);
|
||||
|
||||
/** @brief Equalizes the histogram of a grayscale image.
|
||||
|
||||
//! @} gapi_feature
|
||||
|
||||
The function equalizes the histogram of the input image using the following algorithm:
|
||||
|
||||
- Calculate the histogram \f$H\f$ for src .
|
||||
@@ -676,10 +1065,281 @@ The algorithm normalizes the brightness and increases the contrast of the image.
|
||||
*/
|
||||
GAPI_EXPORTS GMat equalizeHist(const GMat& src);
|
||||
|
||||
//! @} gapi_filters
|
||||
//! @addtogroup gapi_shape
|
||||
//! @{
|
||||
/** @brief Finds contours in a binary image.
|
||||
|
||||
The function retrieves contours from the binary image using the algorithm @cite Suzuki85 .
|
||||
The contours are a useful tool for shape analysis and object detection and recognition.
|
||||
See squares.cpp in the OpenCV sample directory.
|
||||
|
||||
@note Function textual ID is "org.opencv.imgproc.shape.findContours"
|
||||
|
||||
@param src Input gray-scale image @ref CV_8UC1. Non-zero pixels are treated as 1's. Zero
|
||||
pixels remain 0's, so the image is treated as binary . You can use #compare, #inRange, #threshold ,
|
||||
#adaptiveThreshold, #Canny, and others to create a binary image out of a grayscale or color one.
|
||||
If mode equals to #RETR_CCOMP, the input can also be a 32-bit integer
|
||||
image of labels ( @ref CV_32SC1 ). If #RETR_FLOODFILL then @ref CV_32SC1 is supported only.
|
||||
@param mode Contour retrieval mode, see #RetrievalModes
|
||||
@param method Contour approximation method, see #ContourApproximationModes
|
||||
@param offset Optional offset by which every contour point is shifted. This is useful if the
|
||||
contours are extracted from the image ROI and then they should be analyzed in the whole image
|
||||
context.
|
||||
|
||||
@return GArray of detected contours. Each contour is stored as a GArray of points.
|
||||
*/
|
||||
GAPI_EXPORTS GArray<GArray<Point>>
|
||||
findContours(const GMat &src, const RetrievalModes mode, const ContourApproximationModes method,
|
||||
const GOpaque<Point> &offset);
|
||||
|
||||
// FIXME oc: make default value offset = Point()
|
||||
/** @overload
|
||||
@note Function textual ID is "org.opencv.imgproc.shape.findContoursNoOffset"
|
||||
*/
|
||||
GAPI_EXPORTS GArray<GArray<Point>>
|
||||
findContours(const GMat &src, const RetrievalModes mode, const ContourApproximationModes method);
|
||||
|
||||
/** @brief Finds contours and their hierarchy in a binary image.
|
||||
|
||||
The function retrieves contours from the binary image using the algorithm @cite Suzuki85
|
||||
and calculates their hierarchy.
|
||||
The contours are a useful tool for shape analysis and object detection and recognition.
|
||||
See squares.cpp in the OpenCV sample directory.
|
||||
|
||||
@note Function textual ID is "org.opencv.imgproc.shape.findContoursH"
|
||||
|
||||
@param src Input gray-scale image @ref CV_8UC1. Non-zero pixels are treated as 1's. Zero
|
||||
pixels remain 0's, so the image is treated as binary . You can use #compare, #inRange, #threshold ,
|
||||
#adaptiveThreshold, #Canny, and others to create a binary image out of a grayscale or color one.
|
||||
If mode equals to #RETR_CCOMP, the input can also be a 32-bit integer
|
||||
image of labels ( @ref CV_32SC1 ). If #RETR_FLOODFILL -- @ref CV_32SC1 supports only.
|
||||
@param mode Contour retrieval mode, see #RetrievalModes
|
||||
@param method Contour approximation method, see #ContourApproximationModes
|
||||
@param offset Optional offset by which every contour point is shifted. This is useful if the
|
||||
contours are extracted from the image ROI and then they should be analyzed in the whole image
|
||||
context.
|
||||
|
||||
@return GArray of detected contours. Each contour is stored as a GArray of points.
|
||||
@return Optional output GArray of cv::Vec4i, containing information about the image topology.
|
||||
It has as many elements as the number of contours. For each i-th contour contours[i], the elements
|
||||
hierarchy[i][0] , hierarchy[i][1] , hierarchy[i][2] , and hierarchy[i][3] are set to 0-based
|
||||
indices in contours of the next and previous contours at the same hierarchical level, the first
|
||||
child contour and the parent contour, respectively. If for the contour i there are no next,
|
||||
previous, parent, or nested contours, the corresponding elements of hierarchy[i] will be negative.
|
||||
*/
|
||||
GAPI_EXPORTS std::tuple<GArray<GArray<Point>>,GArray<Vec4i>>
|
||||
findContoursH(const GMat &src, const RetrievalModes mode, const ContourApproximationModes method,
|
||||
const GOpaque<Point> &offset);
|
||||
|
||||
// FIXME oc: make default value offset = Point()
|
||||
/** @overload
|
||||
@note Function textual ID is "org.opencv.imgproc.shape.findContoursHNoOffset"
|
||||
*/
|
||||
GAPI_EXPORTS std::tuple<GArray<GArray<Point>>,GArray<Vec4i>>
|
||||
findContoursH(const GMat &src, const RetrievalModes mode, const ContourApproximationModes method);
|
||||
|
||||
/** @brief Calculates the up-right bounding rectangle of a point set or non-zero pixels
|
||||
of gray-scale image.
|
||||
|
||||
The function calculates and returns the minimal up-right bounding rectangle for the specified
|
||||
point set or non-zero pixels of gray-scale image.
|
||||
|
||||
@note Function textual ID is "org.opencv.imgproc.shape.boundingRectMat"
|
||||
|
||||
@param src Input gray-scale image @ref CV_8UC1; or input set of @ref CV_32S or @ref CV_32F
|
||||
2D points stored in Mat.
|
||||
|
||||
@note In case of a 2D points' set given, Mat should be 2-dimensional, have a single row or column
|
||||
if there are 2 channels, or have 2 columns if there is a single channel. Mat should have either
|
||||
@ref CV_32S or @ref CV_32F depth
|
||||
*/
|
||||
GAPI_EXPORTS GOpaque<Rect> boundingRect(const GMat& src);
|
||||
|
||||
/** @overload
|
||||
|
||||
Calculates the up-right bounding rectangle of a point set.
|
||||
|
||||
@note Function textual ID is "org.opencv.imgproc.shape.boundingRectVector32S"
|
||||
|
||||
@param src Input 2D point set, stored in std::vector<cv::Point2i>.
|
||||
*/
|
||||
GAPI_EXPORTS GOpaque<Rect> boundingRect(const GArray<Point2i>& src);
|
||||
|
||||
/** @overload
|
||||
|
||||
Calculates the up-right bounding rectangle of a point set.
|
||||
|
||||
@note Function textual ID is "org.opencv.imgproc.shape.boundingRectVector32F"
|
||||
|
||||
@param src Input 2D point set, stored in std::vector<cv::Point2f>.
|
||||
*/
|
||||
GAPI_EXPORTS GOpaque<Rect> boundingRect(const GArray<Point2f>& src);
|
||||
|
||||
/** @brief Fits a line to a 2D point set.
|
||||
|
||||
The function fits a line to a 2D point set by minimizing \f$\sum_i \rho(r_i)\f$ where
|
||||
\f$r_i\f$ is a distance between the \f$i^{th}\f$ point, the line and \f$\rho(r)\f$ is a distance
|
||||
function, one of the following:
|
||||
- DIST_L2
|
||||
\f[\rho (r) = r^2/2 \quad \text{(the simplest and the fastest least-squares method)}\f]
|
||||
- DIST_L1
|
||||
\f[\rho (r) = r\f]
|
||||
- DIST_L12
|
||||
\f[\rho (r) = 2 \cdot ( \sqrt{1 + \frac{r^2}{2}} - 1)\f]
|
||||
- DIST_FAIR
|
||||
\f[\rho \left (r \right ) = C^2 \cdot \left ( \frac{r}{C} - \log{\left(1 + \frac{r}{C}\right)} \right ) \quad \text{where} \quad C=1.3998\f]
|
||||
- DIST_WELSCH
|
||||
\f[\rho \left (r \right ) = \frac{C^2}{2} \cdot \left ( 1 - \exp{\left(-\left(\frac{r}{C}\right)^2\right)} \right ) \quad \text{where} \quad C=2.9846\f]
|
||||
- DIST_HUBER
|
||||
\f[\rho (r) = \fork{r^2/2}{if \(r < C\)}{C \cdot (r-C/2)}{otherwise} \quad \text{where} \quad C=1.345\f]
|
||||
|
||||
The algorithm is based on the M-estimator ( <http://en.wikipedia.org/wiki/M-estimator> ) technique
|
||||
that iteratively fits the line using the weighted least-squares algorithm. After each iteration the
|
||||
weights \f$w_i\f$ are adjusted to be inversely proportional to \f$\rho(r_i)\f$ .
|
||||
|
||||
@note Function textual ID is "org.opencv.imgproc.shape.fitLine2DMat"
|
||||
|
||||
@param src Input set of 2D points stored in one of possible containers: Mat,
|
||||
std::vector<cv::Point2i>, std::vector<cv::Point2f>, std::vector<cv::Point2d>.
|
||||
|
||||
@note In case of an N-dimentional points' set given, Mat should be 2-dimensional, have a single row
|
||||
or column if there are N channels, or have N columns if there is a single channel.
|
||||
|
||||
@param distType Distance used by the M-estimator, see #DistanceTypes. @ref DIST_USER
|
||||
and @ref DIST_C are not suppored.
|
||||
@param param Numerical parameter ( C ) for some types of distances. If it is 0, an optimal value
|
||||
is chosen.
|
||||
@param reps Sufficient accuracy for the radius (distance between the coordinate origin and the
|
||||
line). 1.0 would be a good default value for reps. If it is 0, a default value is chosen.
|
||||
@param aeps Sufficient accuracy for the angle. 0.01 would be a good default value for aeps.
|
||||
If it is 0, a default value is chosen.
|
||||
|
||||
@return Output line parameters: a vector of 4 elements (like Vec4f) - (vx, vy, x0, y0),
|
||||
where (vx, vy) is a normalized vector collinear to the line and (x0, y0) is a point on the line.
|
||||
*/
|
||||
GAPI_EXPORTS GOpaque<Vec4f> fitLine2D(const GMat& src, const DistanceTypes distType,
|
||||
const double param = 0., const double reps = 0.,
|
||||
const double aeps = 0.);
|
||||
|
||||
/** @overload
|
||||
|
||||
@note Function textual ID is "org.opencv.imgproc.shape.fitLine2DVector32S"
|
||||
|
||||
*/
|
||||
GAPI_EXPORTS GOpaque<Vec4f> fitLine2D(const GArray<Point2i>& src, const DistanceTypes distType,
|
||||
const double param = 0., const double reps = 0.,
|
||||
const double aeps = 0.);
|
||||
|
||||
/** @overload
|
||||
|
||||
@note Function textual ID is "org.opencv.imgproc.shape.fitLine2DVector32F"
|
||||
|
||||
*/
|
||||
GAPI_EXPORTS GOpaque<Vec4f> fitLine2D(const GArray<Point2f>& src, const DistanceTypes distType,
|
||||
const double param = 0., const double reps = 0.,
|
||||
const double aeps = 0.);
|
||||
|
||||
/** @overload
|
||||
|
||||
@note Function textual ID is "org.opencv.imgproc.shape.fitLine2DVector64F"
|
||||
|
||||
*/
|
||||
GAPI_EXPORTS GOpaque<Vec4f> fitLine2D(const GArray<Point2d>& src, const DistanceTypes distType,
|
||||
const double param = 0., const double reps = 0.,
|
||||
const double aeps = 0.);
|
||||
|
||||
/** @brief Fits a line to a 3D point set.
|
||||
|
||||
The function fits a line to a 3D point set by minimizing \f$\sum_i \rho(r_i)\f$ where
|
||||
\f$r_i\f$ is a distance between the \f$i^{th}\f$ point, the line and \f$\rho(r)\f$ is a distance
|
||||
function, one of the following:
|
||||
- DIST_L2
|
||||
\f[\rho (r) = r^2/2 \quad \text{(the simplest and the fastest least-squares method)}\f]
|
||||
- DIST_L1
|
||||
\f[\rho (r) = r\f]
|
||||
- DIST_L12
|
||||
\f[\rho (r) = 2 \cdot ( \sqrt{1 + \frac{r^2}{2}} - 1)\f]
|
||||
- DIST_FAIR
|
||||
\f[\rho \left (r \right ) = C^2 \cdot \left ( \frac{r}{C} - \log{\left(1 + \frac{r}{C}\right)} \right ) \quad \text{where} \quad C=1.3998\f]
|
||||
- DIST_WELSCH
|
||||
\f[\rho \left (r \right ) = \frac{C^2}{2} \cdot \left ( 1 - \exp{\left(-\left(\frac{r}{C}\right)^2\right)} \right ) \quad \text{where} \quad C=2.9846\f]
|
||||
- DIST_HUBER
|
||||
\f[\rho (r) = \fork{r^2/2}{if \(r < C\)}{C \cdot (r-C/2)}{otherwise} \quad \text{where} \quad C=1.345\f]
|
||||
|
||||
The algorithm is based on the M-estimator ( <http://en.wikipedia.org/wiki/M-estimator> ) technique
|
||||
that iteratively fits the line using the weighted least-squares algorithm. After each iteration the
|
||||
weights \f$w_i\f$ are adjusted to be inversely proportional to \f$\rho(r_i)\f$ .
|
||||
|
||||
@note Function textual ID is "org.opencv.imgproc.shape.fitLine3DMat"
|
||||
|
||||
@param src Input set of 3D points stored in one of possible containers: Mat,
|
||||
std::vector<cv::Point3i>, std::vector<cv::Point3f>, std::vector<cv::Point3d>.
|
||||
|
||||
@note In case of an N-dimentional points' set given, Mat should be 2-dimensional, have a single row
|
||||
or column if there are N channels, or have N columns if there is a single channel.
|
||||
|
||||
@param distType Distance used by the M-estimator, see #DistanceTypes. @ref DIST_USER
|
||||
and @ref DIST_C are not suppored.
|
||||
@param param Numerical parameter ( C ) for some types of distances. If it is 0, an optimal value
|
||||
is chosen.
|
||||
@param reps Sufficient accuracy for the radius (distance between the coordinate origin and the
|
||||
line). 1.0 would be a good default value for reps. If it is 0, a default value is chosen.
|
||||
@param aeps Sufficient accuracy for the angle. 0.01 would be a good default value for aeps.
|
||||
If it is 0, a default value is chosen.
|
||||
|
||||
@return Output line parameters: a vector of 6 elements (like Vec6f) - (vx, vy, vz, x0, y0, z0),
|
||||
where (vx, vy, vz) is a normalized vector collinear to the line and (x0, y0, z0) is a point on
|
||||
the line.
|
||||
*/
|
||||
GAPI_EXPORTS GOpaque<Vec6f> fitLine3D(const GMat& src, const DistanceTypes distType,
|
||||
const double param = 0., const double reps = 0.,
|
||||
const double aeps = 0.);
|
||||
|
||||
/** @overload
|
||||
|
||||
@note Function textual ID is "org.opencv.imgproc.shape.fitLine3DVector32S"
|
||||
|
||||
*/
|
||||
GAPI_EXPORTS GOpaque<Vec6f> fitLine3D(const GArray<Point3i>& src, const DistanceTypes distType,
|
||||
const double param = 0., const double reps = 0.,
|
||||
const double aeps = 0.);
|
||||
|
||||
/** @overload
|
||||
|
||||
@note Function textual ID is "org.opencv.imgproc.shape.fitLine3DVector32F"
|
||||
|
||||
*/
|
||||
GAPI_EXPORTS GOpaque<Vec6f> fitLine3D(const GArray<Point3f>& src, const DistanceTypes distType,
|
||||
const double param = 0., const double reps = 0.,
|
||||
const double aeps = 0.);
|
||||
|
||||
/** @overload
|
||||
|
||||
@note Function textual ID is "org.opencv.imgproc.shape.fitLine3DVector64F"
|
||||
|
||||
*/
|
||||
GAPI_EXPORTS GOpaque<Vec6f> fitLine3D(const GArray<Point3d>& src, const DistanceTypes distType,
|
||||
const double param = 0., const double reps = 0.,
|
||||
const double aeps = 0.);
|
||||
|
||||
//! @} gapi_shape
|
||||
|
||||
//! @addtogroup gapi_colorconvert
|
||||
//! @{
|
||||
/** @brief Converts an image from BGR color space to RGB color space.
|
||||
|
||||
The function converts an input image from BGR color space to RGB.
|
||||
The conventional ranges for B, G, and R channel values are 0 to 255.
|
||||
|
||||
Output image is 8-bit unsigned 3-channel image @ref CV_8UC3.
|
||||
|
||||
@note Function textual ID is "org.opencv.imgproc.colorconvert.bgr2rgb"
|
||||
|
||||
@param src input image: 8-bit unsigned 3-channel image @ref CV_8UC3.
|
||||
@sa RGB2BGR
|
||||
*/
|
||||
GAPI_EXPORTS GMat BGR2RGB(const GMat& src);
|
||||
|
||||
/** @brief Converts an image from RGB color space to gray-scaled.
|
||||
The conventional ranges for R, G, and B channel values are 0 to 255.
|
||||
Resulting gray color value computed as
|
||||
@@ -735,6 +1395,70 @@ Output image must be 8-bit unsigned 3-channel image @ref CV_8UC3.
|
||||
*/
|
||||
GAPI_EXPORTS GMat RGB2YUV(const GMat& src);
|
||||
|
||||
/** @brief Converts an image from BGR color space to I420 color space.
|
||||
|
||||
The function converts an input image from BGR color space to I420.
|
||||
The conventional ranges for R, G, and B channel values are 0 to 255.
|
||||
|
||||
Output image must be 8-bit unsigned 1-channel image. @ref CV_8UC1.
|
||||
Width of I420 output image must be the same as width of input image.
|
||||
Height of I420 output image must be equal 3/2 from height of input image.
|
||||
|
||||
@note Function textual ID is "org.opencv.imgproc.colorconvert.bgr2i420"
|
||||
|
||||
@param src input image: 8-bit unsigned 3-channel image @ref CV_8UC3.
|
||||
@sa I4202BGR
|
||||
*/
|
||||
GAPI_EXPORTS GMat BGR2I420(const GMat& src);
|
||||
|
||||
/** @brief Converts an image from RGB color space to I420 color space.
|
||||
|
||||
The function converts an input image from RGB color space to I420.
|
||||
The conventional ranges for R, G, and B channel values are 0 to 255.
|
||||
|
||||
Output image must be 8-bit unsigned 1-channel image. @ref CV_8UC1.
|
||||
Width of I420 output image must be the same as width of input image.
|
||||
Height of I420 output image must be equal 3/2 from height of input image.
|
||||
|
||||
@note Function textual ID is "org.opencv.imgproc.colorconvert.rgb2i420"
|
||||
|
||||
@param src input image: 8-bit unsigned 3-channel image @ref CV_8UC3.
|
||||
@sa I4202RGB
|
||||
*/
|
||||
GAPI_EXPORTS GMat RGB2I420(const GMat& src);
|
||||
|
||||
/** @brief Converts an image from I420 color space to BGR color space.
|
||||
|
||||
The function converts an input image from I420 color space to BGR.
|
||||
The conventional ranges for B, G, and R channel values are 0 to 255.
|
||||
|
||||
Output image must be 8-bit unsigned 3-channel image. @ref CV_8UC3.
|
||||
Width of BGR output image must be the same as width of input image.
|
||||
Height of BGR output image must be equal 2/3 from height of input image.
|
||||
|
||||
@note Function textual ID is "org.opencv.imgproc.colorconvert.i4202bgr"
|
||||
|
||||
@param src input image: 8-bit unsigned 1-channel image @ref CV_8UC1.
|
||||
@sa BGR2I420
|
||||
*/
|
||||
GAPI_EXPORTS GMat I4202BGR(const GMat& src);
|
||||
|
||||
/** @brief Converts an image from I420 color space to BGR color space.
|
||||
|
||||
The function converts an input image from I420 color space to BGR.
|
||||
The conventional ranges for B, G, and R channel values are 0 to 255.
|
||||
|
||||
Output image must be 8-bit unsigned 3-channel image. @ref CV_8UC3.
|
||||
Width of RGB output image must be the same as width of input image.
|
||||
Height of RGB output image must be equal 2/3 from height of input image.
|
||||
|
||||
@note Function textual ID is "org.opencv.imgproc.colorconvert.i4202rgb"
|
||||
|
||||
@param src input image: 8-bit unsigned 1-channel image @ref CV_8UC1.
|
||||
@sa RGB2I420
|
||||
*/
|
||||
GAPI_EXPORTS GMat I4202RGB(const GMat& src);
|
||||
|
||||
/** @brief Converts an image from BGR color space to LUV color space.
|
||||
|
||||
The function converts an input image from BGR color space to LUV.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
//
|
||||
// Copyright (C) 2019 Intel Corporation
|
||||
// Copyright (C) 2019-2020 Intel Corporation
|
||||
|
||||
|
||||
#ifndef OPENCV_GAPI_INFER_HPP
|
||||
@@ -14,6 +14,7 @@
|
||||
#include <functional>
|
||||
#include <string> // string
|
||||
#include <utility> // tuple
|
||||
#include <type_traits> // is_same, false_type
|
||||
|
||||
#include <opencv2/gapi/util/any.hpp> // any<>
|
||||
#include <opencv2/gapi/gkernel.hpp> // GKernelType[M], GBackend
|
||||
@@ -25,6 +26,43 @@ namespace cv {
|
||||
|
||||
template<typename, typename> class GNetworkType;
|
||||
|
||||
namespace detail {
|
||||
template<typename, typename>
|
||||
struct valid_infer2_types;
|
||||
|
||||
// Terminal case 1 (50/50 success)
|
||||
template<typename T>
|
||||
struct valid_infer2_types< std::tuple<cv::GMat>, std::tuple<T> > {
|
||||
// By default, Nets are limited to GMat argument types only
|
||||
// for infer2, every GMat argument may translate to either
|
||||
// GArray<GMat> or GArray<Rect>. GArray<> part is stripped
|
||||
// already at this point.
|
||||
static constexpr const auto value =
|
||||
std::is_same<typename std::decay<T>::type, cv::GMat>::value
|
||||
|| std::is_same<typename std::decay<T>::type, cv::Rect>::value;
|
||||
};
|
||||
|
||||
// Terminal case 2 (100% failure)
|
||||
template<typename... Ts>
|
||||
struct valid_infer2_types< std::tuple<>, std::tuple<Ts...> >
|
||||
: public std::false_type {
|
||||
};
|
||||
|
||||
// Terminal case 3 (100% failure)
|
||||
template<typename... Ns>
|
||||
struct valid_infer2_types< std::tuple<Ns...>, std::tuple<> >
|
||||
: public std::false_type {
|
||||
};
|
||||
|
||||
// Recursion -- generic
|
||||
template<typename... Ns, typename T, typename...Ts>
|
||||
struct valid_infer2_types< std::tuple<cv::GMat,Ns...>, std::tuple<T,Ts...> > {
|
||||
static constexpr const auto value =
|
||||
valid_infer2_types< std::tuple<cv::GMat>, std::tuple<T> >::value
|
||||
&& valid_infer2_types< std::tuple<Ns...>, std::tuple<Ts...> >::value;
|
||||
};
|
||||
} // namespace detail
|
||||
|
||||
// TODO: maybe tuple_wrap_helper from util.hpp may help with this.
|
||||
// Multiple-return-value network definition (specialized base class)
|
||||
template<typename K, typename... R, typename... Args>
|
||||
@@ -39,6 +77,9 @@ public:
|
||||
|
||||
using ResultL = std::tuple< cv::GArray<R>... >;
|
||||
using APIList = std::function<ResultL(cv::GArray<cv::Rect>, Args...)>;
|
||||
|
||||
// FIXME: Args... must be limited to a single GMat
|
||||
using APIRoi = std::function<Result(cv::GOpaque<cv::Rect>, Args...)>;
|
||||
};
|
||||
|
||||
// Single-return-value network definition (specialized base class)
|
||||
@@ -54,6 +95,20 @@ public:
|
||||
|
||||
using ResultL = cv::GArray<R>;
|
||||
using APIList = std::function<ResultL(cv::GArray<cv::Rect>, Args...)>;
|
||||
|
||||
// FIXME: Args... must be limited to a single GMat
|
||||
using APIRoi = std::function<Result(cv::GOpaque<cv::Rect>, Args...)>;
|
||||
};
|
||||
|
||||
// APIList2 is also template to allow different calling options
|
||||
// (GArray<cv::Rect> vs GArray<cv::GMat> per input)
|
||||
template<class Net, class... Ts>
|
||||
struct InferAPIList2 {
|
||||
using type = typename std::enable_if
|
||||
< cv::detail::valid_infer2_types< typename Net::InArgs
|
||||
, std::tuple<Ts...> >::value,
|
||||
std::function<typename Net::ResultL(cv::GMat, cv::GArray<Ts>...)>
|
||||
>::type;
|
||||
};
|
||||
|
||||
// Base "Infer" kernel. Note - for whatever network, kernel ID
|
||||
@@ -65,22 +120,86 @@ public:
|
||||
// a particular backend, not by a network itself.
|
||||
struct GInferBase {
|
||||
static constexpr const char * id() {
|
||||
return "org.opencv.dnn.infer"; // Universal stub
|
||||
return "org.opencv.dnn.infer"; // Universal stub
|
||||
}
|
||||
static GMetaArgs getOutMeta(const GMetaArgs &, const GArgs &) {
|
||||
return GMetaArgs{}; // One more universal stub
|
||||
return GMetaArgs{}; // One more universal stub
|
||||
}
|
||||
};
|
||||
|
||||
// Struct stores network input/output names.
|
||||
// Used by infer<Generic>
|
||||
struct InOutInfo
|
||||
{
|
||||
std::vector<std::string> in_names;
|
||||
std::vector<std::string> out_names;
|
||||
};
|
||||
|
||||
/**
|
||||
* @{
|
||||
* @brief G-API object used to collect network inputs
|
||||
*/
|
||||
class GAPI_EXPORTS_W_SIMPLE GInferInputs
|
||||
{
|
||||
using Map = std::unordered_map<std::string, GMat>;
|
||||
public:
|
||||
GAPI_WRAP GInferInputs();
|
||||
GAPI_WRAP void setInput(const std::string& name, const cv::GMat& value);
|
||||
|
||||
cv::GMat& operator[](const std::string& name);
|
||||
const Map& getBlobs() const;
|
||||
|
||||
private:
|
||||
std::shared_ptr<Map> in_blobs;
|
||||
};
|
||||
/** @} */
|
||||
|
||||
/**
|
||||
* @{
|
||||
* @brief G-API object used to collect network outputs
|
||||
*/
|
||||
struct GAPI_EXPORTS_W_SIMPLE GInferOutputs
|
||||
{
|
||||
public:
|
||||
GAPI_WRAP GInferOutputs() = default;
|
||||
GInferOutputs(std::shared_ptr<cv::GCall> call);
|
||||
GAPI_WRAP cv::GMat at(const std::string& name);
|
||||
|
||||
private:
|
||||
struct Priv;
|
||||
std::shared_ptr<Priv> m_priv;
|
||||
};
|
||||
/** @} */
|
||||
// Base "InferROI" kernel.
|
||||
// All notes from "Infer" kernel apply here as well.
|
||||
struct GInferROIBase {
|
||||
static constexpr const char * id() {
|
||||
return "org.opencv.dnn.infer-roi"; // Universal stub
|
||||
}
|
||||
static GMetaArgs getOutMeta(const GMetaArgs &, const GArgs &) {
|
||||
return GMetaArgs{}; // One more universal stub
|
||||
}
|
||||
};
|
||||
|
||||
// Base "Infer list" kernel.
|
||||
// All notes from "Infer" kernel apply here as well.
|
||||
struct GInferListBase {
|
||||
static constexpr const char * id() {
|
||||
return "org.opencv.dnn.infer-roi"; // Universal stub
|
||||
return "org.opencv.dnn.infer-roi-list-1"; // Universal stub
|
||||
}
|
||||
static GMetaArgs getOutMeta(const GMetaArgs &, const GArgs &) {
|
||||
return GMetaArgs{}; // One more universal stub
|
||||
return GMetaArgs{}; // One more universal stub
|
||||
}
|
||||
};
|
||||
|
||||
// Base "Infer list 2" kernel.
|
||||
// All notes from "Infer" kernel apply here as well.
|
||||
struct GInferList2Base {
|
||||
static constexpr const char * id() {
|
||||
return "org.opencv.dnn.infer-roi-list-2"; // Universal stub
|
||||
}
|
||||
static GMetaArgs getOutMeta(const GMetaArgs &, const GArgs &) {
|
||||
return GMetaArgs{}; // One more universal stub
|
||||
}
|
||||
};
|
||||
|
||||
@@ -97,6 +216,19 @@ struct GInfer final
|
||||
static constexpr const char* tag() { return Net::tag(); }
|
||||
};
|
||||
|
||||
// A specific roi-inference kernel. API (::on()) is fixed here and
|
||||
// verified against Net.
|
||||
template<typename Net>
|
||||
struct GInferROI final
|
||||
: public GInferROIBase
|
||||
, public detail::KernelTypeMedium< GInferROI<Net>
|
||||
, typename Net::APIRoi > {
|
||||
using GInferROIBase::getOutMeta; // FIXME: name lookup conflict workaround?
|
||||
|
||||
static constexpr const char* tag() { return Net::tag(); }
|
||||
};
|
||||
|
||||
|
||||
// A generic roi-list inference kernel. API (::on()) is derived from
|
||||
// the Net template parameter (see more in infer<> overload).
|
||||
template<typename Net>
|
||||
@@ -109,6 +241,21 @@ struct GInferList final
|
||||
static constexpr const char* tag() { return Net::tag(); }
|
||||
};
|
||||
|
||||
// An even more generic roi-list inference kernel. API (::on()) is
|
||||
// derived from the Net template parameter (see more in infer<>
|
||||
// overload).
|
||||
// Takes an extra variadic template list to reflect how this network
|
||||
// was called (with Rects or GMats as array parameters)
|
||||
template<typename Net, typename... Args>
|
||||
struct GInferList2 final
|
||||
: public GInferList2Base
|
||||
, public detail::KernelTypeMedium< GInferList2<Net, Args...>
|
||||
, typename InferAPIList2<Net, Args...>::type > {
|
||||
using GInferList2Base::getOutMeta; // FIXME: name lookup conflict workaround?
|
||||
|
||||
static constexpr const char* tag() { return Net::tag(); }
|
||||
};
|
||||
|
||||
} // namespace cv
|
||||
|
||||
// FIXME: Probably the <API> signature makes a function/tuple/function round-trip
|
||||
@@ -120,6 +267,23 @@ struct GInferList final
|
||||
namespace cv {
|
||||
namespace gapi {
|
||||
|
||||
/** @brief Calculates response for the specified network (template
|
||||
* parameter) for the specified region in the source image.
|
||||
* Currently expects a single-input network only.
|
||||
*
|
||||
* @tparam A network type defined with G_API_NET() macro.
|
||||
* @param in input image where to take ROI from.
|
||||
* @param roi an object describing the region of interest
|
||||
* in the source image. May be calculated in the same graph dynamically.
|
||||
* @return an object of return type as defined in G_API_NET().
|
||||
* If a network has multiple return values (defined with a tuple), a tuple of
|
||||
* objects of appropriate type is returned.
|
||||
* @sa G_API_NET()
|
||||
*/
|
||||
template<typename Net>
|
||||
typename Net::Result infer(cv::GOpaque<cv::Rect> roi, cv::GMat in) {
|
||||
return GInferROI<Net>::on(roi, in);
|
||||
}
|
||||
|
||||
/** @brief Calculates responses for the specified network (template
|
||||
* parameter) for every region in the source image.
|
||||
@@ -139,6 +303,30 @@ typename Net::ResultL infer(cv::GArray<cv::Rect> roi, Args&&... args) {
|
||||
return GInferList<Net>::on(roi, std::forward<Args>(args)...);
|
||||
}
|
||||
|
||||
/** @brief Calculates responses for the specified network (template
|
||||
* parameter) for every region in the source image, extended version.
|
||||
*
|
||||
* @tparam A network type defined with G_API_NET() macro.
|
||||
* @param image A source image containing regions of interest
|
||||
* @param args GArray<> objects of cv::Rect or cv::GMat, one per every
|
||||
* network input:
|
||||
* - If a cv::GArray<cv::Rect> is passed, the appropriate
|
||||
* regions are taken from `image` and preprocessed to this particular
|
||||
* network input;
|
||||
* - If a cv::GArray<cv::GMat> is passed, the underlying data traited
|
||||
* as tensor (no automatic preprocessing happen).
|
||||
* @return a list of objects of return type as defined in G_API_NET().
|
||||
* If a network has multiple return values (defined with a tuple), a tuple of
|
||||
* GArray<> objects is returned with the appropriate types inside.
|
||||
* @sa G_API_NET()
|
||||
*/
|
||||
template<typename Net, typename... Args>
|
||||
typename Net::ResultL infer2(cv::GMat image, cv::GArray<Args>... args) {
|
||||
// FIXME: Declared as "2" because in the current form it steals
|
||||
// overloads from the regular infer
|
||||
return GInferList2<Net, Args...>::on(image, args...);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Calculates response for the specified network (template
|
||||
* parameter) given the input data.
|
||||
@@ -155,6 +343,51 @@ typename Net::Result infer(Args&&... args) {
|
||||
return GInfer<Net>::on(std::forward<Args>(args)...);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Special network type
|
||||
*/
|
||||
struct Generic { };
|
||||
|
||||
/**
|
||||
* @brief Calculates response for generic network
|
||||
*
|
||||
* @param tag a network tag
|
||||
* @param inputs networks's inputs
|
||||
* @return a GInferOutputs
|
||||
*/
|
||||
template<typename T = Generic> GInferOutputs
|
||||
infer(const std::string& tag, const GInferInputs& inputs)
|
||||
{
|
||||
std::vector<GArg> input_args;
|
||||
std::vector<std::string> input_names;
|
||||
|
||||
const auto& blobs = inputs.getBlobs();
|
||||
for (auto&& p : blobs)
|
||||
{
|
||||
input_names.push_back(p.first);
|
||||
input_args.emplace_back(p.second);
|
||||
}
|
||||
|
||||
GKinds kinds(blobs.size(), cv::detail::OpaqueKind::CV_MAT);
|
||||
auto call = std::make_shared<cv::GCall>(GKernel{
|
||||
GInferBase::id(),
|
||||
tag,
|
||||
GInferBase::getOutMeta,
|
||||
{}, // outShape will be filled later
|
||||
std::move(kinds),
|
||||
{}, // outCtors will be filled later
|
||||
});
|
||||
|
||||
call->setArgs(std::move(input_args));
|
||||
call->params() = InOutInfo{input_names, {}};
|
||||
|
||||
return GInferOutputs{std::move(call)};
|
||||
}
|
||||
|
||||
GAPI_EXPORTS_W inline GInferOutputs infer(const String& name, const GInferInputs& inputs)
|
||||
{
|
||||
return infer<Generic>(name, inputs);
|
||||
}
|
||||
|
||||
} // namespace gapi
|
||||
} // namespace cv
|
||||
@@ -184,8 +417,8 @@ struct GAPI_EXPORTS GNetParam {
|
||||
*
|
||||
* @sa cv::gapi::networks
|
||||
*/
|
||||
struct GAPI_EXPORTS GNetPackage {
|
||||
GNetPackage() : GNetPackage({}) {}
|
||||
struct GAPI_EXPORTS_W_SIMPLE GNetPackage {
|
||||
GAPI_WRAP GNetPackage() : GNetPackage({}) {}
|
||||
explicit GNetPackage(std::initializer_list<GNetParam> &&ii);
|
||||
std::vector<GBackend> backends() const;
|
||||
std::vector<GNetParam> networks;
|
||||
|
||||
56
inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/infer/bindings_ie.hpp
vendored
Normal file
56
inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/infer/bindings_ie.hpp
vendored
Normal file
@@ -0,0 +1,56 @@
|
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
//
|
||||
// Copyright (C) 2020 Intel Corporation
|
||||
|
||||
#ifndef OPENCV_GAPI_INFER_BINDINGS_IE_HPP
|
||||
#define OPENCV_GAPI_INFER_BINDINGS_IE_HPP
|
||||
|
||||
#include <opencv2/gapi/util/any.hpp>
|
||||
#include "opencv2/gapi/own/exports.hpp" // GAPI_EXPORTS
|
||||
#include <opencv2/gapi/gkernel.hpp> // GKernelPackage
|
||||
#include <opencv2/gapi/infer/ie.hpp> // Params
|
||||
|
||||
#include <string>
|
||||
|
||||
namespace cv {
|
||||
namespace gapi {
|
||||
namespace ie {
|
||||
|
||||
// NB: Used by python wrapper
|
||||
// This class can be marked as SIMPLE, because it's implemented as pimpl
|
||||
class GAPI_EXPORTS_W_SIMPLE PyParams {
|
||||
public:
|
||||
PyParams() = default;
|
||||
|
||||
PyParams(const std::string &tag,
|
||||
const std::string &model,
|
||||
const std::string &weights,
|
||||
const std::string &device);
|
||||
|
||||
PyParams(const std::string &tag,
|
||||
const std::string &model,
|
||||
const std::string &device);
|
||||
|
||||
GBackend backend() const;
|
||||
std::string tag() const;
|
||||
cv::util::any params() const;
|
||||
|
||||
private:
|
||||
std::shared_ptr<Params<cv::gapi::Generic>> m_priv;
|
||||
};
|
||||
|
||||
GAPI_EXPORTS_W PyParams params(const std::string &tag,
|
||||
const std::string &model,
|
||||
const std::string &weights,
|
||||
const std::string &device);
|
||||
|
||||
GAPI_EXPORTS_W PyParams params(const std::string &tag,
|
||||
const std::string &model,
|
||||
const std::string &device);
|
||||
} // namespace ie
|
||||
} // namespace gapi
|
||||
} // namespace cv
|
||||
|
||||
#endif // OPENCV_GAPI_INFER_BINDINGS_IE_HPP
|
||||
@@ -11,12 +11,14 @@
|
||||
#include <string>
|
||||
#include <array>
|
||||
#include <tuple> // tuple, tuple_size
|
||||
#include <map>
|
||||
|
||||
#include <opencv2/gapi/opencv_includes.hpp>
|
||||
#include <opencv2/gapi/util/any.hpp>
|
||||
|
||||
#include <opencv2/core/cvdef.h> // GAPI_EXPORTS
|
||||
#include <opencv2/gapi/gkernel.hpp> // GKernelPackage
|
||||
#include <opencv2/gapi/infer.hpp> // Generic
|
||||
|
||||
namespace cv {
|
||||
namespace gapi {
|
||||
@@ -41,6 +43,8 @@ enum class TraitAs: int
|
||||
IMAGE //!< G-API traits an associated cv::Mat as an image so creates an "image" blob (NCHW/NHWC, etc)
|
||||
};
|
||||
|
||||
using IEConfig = std::map<std::string, std::string>;
|
||||
|
||||
namespace detail {
|
||||
struct ParamDesc {
|
||||
std::string model_path;
|
||||
@@ -58,6 +62,11 @@ namespace detail {
|
||||
// (e.g. topology's partial execution)
|
||||
std::size_t num_in; // How many inputs are defined in the operation
|
||||
std::size_t num_out; // How many outputs are defined in the operation
|
||||
|
||||
enum class Kind { Load, Import };
|
||||
Kind kind;
|
||||
bool is_generic;
|
||||
IEConfig config;
|
||||
};
|
||||
} // namespace detail
|
||||
|
||||
@@ -78,9 +87,21 @@ public:
|
||||
const std::string &weights,
|
||||
const std::string &device)
|
||||
: desc{ model, weights, device, {}, {}, {}
|
||||
, std::tuple_size<typename Net::InArgs>::value
|
||||
, std::tuple_size<typename Net::OutArgs>::value
|
||||
} {
|
||||
, std::tuple_size<typename Net::InArgs>::value // num_in
|
||||
, std::tuple_size<typename Net::OutArgs>::value // num_out
|
||||
, detail::ParamDesc::Kind::Load
|
||||
, false
|
||||
, {}} {
|
||||
};
|
||||
|
||||
Params(const std::string &model,
|
||||
const std::string &device)
|
||||
: desc{ model, {}, device, {}, {}, {}
|
||||
, std::tuple_size<typename Net::InArgs>::value // num_in
|
||||
, std::tuple_size<typename Net::OutArgs>::value // num_out
|
||||
, detail::ParamDesc::Kind::Import
|
||||
, false
|
||||
, {}} {
|
||||
};
|
||||
|
||||
Params<Net>& cfgInputLayers(const typename PortCfg<Net>::In &ll) {
|
||||
@@ -106,18 +127,65 @@ public:
|
||||
return *this;
|
||||
}
|
||||
|
||||
Params& pluginConfig(IEConfig&& cfg) {
|
||||
desc.config = std::move(cfg);
|
||||
return *this;
|
||||
}
|
||||
|
||||
Params& pluginConfig(const IEConfig& cfg) {
|
||||
desc.config = cfg;
|
||||
return *this;
|
||||
}
|
||||
|
||||
// BEGIN(G-API's network parametrization API)
|
||||
GBackend backend() const { return cv::gapi::ie::backend(); }
|
||||
std::string tag() const { return Net::tag(); }
|
||||
cv::util::any params() const { return { desc }; }
|
||||
GBackend backend() const { return cv::gapi::ie::backend(); }
|
||||
std::string tag() const { return Net::tag(); }
|
||||
cv::util::any params() const { return { desc }; }
|
||||
// END(G-API's network parametrization API)
|
||||
|
||||
protected:
|
||||
detail::ParamDesc desc;
|
||||
};
|
||||
|
||||
template<>
|
||||
class Params<cv::gapi::Generic> {
|
||||
public:
|
||||
Params(const std::string &tag,
|
||||
const std::string &model,
|
||||
const std::string &weights,
|
||||
const std::string &device)
|
||||
: desc{ model, weights, device, {}, {}, {}, 0u, 0u, detail::ParamDesc::Kind::Load, true, {}}, m_tag(tag) {
|
||||
};
|
||||
|
||||
Params(const std::string &tag,
|
||||
const std::string &model,
|
||||
const std::string &device)
|
||||
: desc{ model, {}, device, {}, {}, {}, 0u, 0u, detail::ParamDesc::Kind::Import, true, {}}, m_tag(tag) {
|
||||
};
|
||||
|
||||
Params& pluginConfig(IEConfig&& cfg) {
|
||||
desc.config = std::move(cfg);
|
||||
return *this;
|
||||
}
|
||||
|
||||
Params& pluginConfig(const IEConfig& cfg) {
|
||||
desc.config = cfg;
|
||||
return *this;
|
||||
}
|
||||
|
||||
// BEGIN(G-API's network parametrization API)
|
||||
GBackend backend() const { return cv::gapi::ie::backend(); }
|
||||
std::string tag() const { return m_tag; }
|
||||
cv::util::any params() const { return { desc }; }
|
||||
// END(G-API's network parametrization API)
|
||||
|
||||
protected:
|
||||
detail::ParamDesc desc;
|
||||
std::string m_tag;
|
||||
};
|
||||
|
||||
} // namespace ie
|
||||
} // namespace gapi
|
||||
} // namespace cv
|
||||
|
||||
#endif // OPENCV_GAPI_INFER_HPP
|
||||
#endif // OPENCV_GAPI_INFER_IE_HPP
|
||||
|
||||
138
inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/infer/onnx.hpp
vendored
Normal file
138
inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/infer/onnx.hpp
vendored
Normal file
@@ -0,0 +1,138 @@
|
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
//
|
||||
// Copyright (C) 2020 Intel Corporation
|
||||
|
||||
#ifndef OPENCV_GAPI_INFER_ONNX_HPP
|
||||
#define OPENCV_GAPI_INFER_ONNX_HPP
|
||||
|
||||
#include <unordered_map>
|
||||
#include <string>
|
||||
#include <array>
|
||||
#include <tuple> // tuple, tuple_size
|
||||
|
||||
#include <opencv2/gapi/opencv_includes.hpp>
|
||||
#include <opencv2/gapi/util/any.hpp>
|
||||
|
||||
#include <opencv2/core/cvdef.h> // GAPI_EXPORTS
|
||||
#include <opencv2/gapi/gkernel.hpp> // GKernelPackage
|
||||
|
||||
namespace cv {
|
||||
namespace gapi {
|
||||
namespace onnx {
|
||||
|
||||
GAPI_EXPORTS cv::gapi::GBackend backend();
|
||||
|
||||
enum class TraitAs: int {
|
||||
TENSOR, //!< G-API traits an associated cv::Mat as a raw tensor
|
||||
// and passes dimensions as-is
|
||||
IMAGE //!< G-API traits an associated cv::Mat as an image so
|
||||
// creates an "image" blob (NCHW/NHWC, etc)
|
||||
};
|
||||
|
||||
using PostProc = std::function<void(const std::unordered_map<std::string, cv::Mat> &,
|
||||
std::unordered_map<std::string, cv::Mat> &)>;
|
||||
|
||||
|
||||
namespace detail {
|
||||
struct ParamDesc {
|
||||
std::string model_path;
|
||||
|
||||
// NB: nun_* may differ from topology's real input/output port numbers
|
||||
// (e.g. topology's partial execution)
|
||||
std::size_t num_in; // How many inputs are defined in the operation
|
||||
std::size_t num_out; // How many outputs are defined in the operation
|
||||
|
||||
// NB: Here order follows the `Net` API
|
||||
std::vector<std::string> input_names;
|
||||
std::vector<std::string> output_names;
|
||||
|
||||
using ConstInput = std::pair<cv::Mat, TraitAs>;
|
||||
std::unordered_map<std::string, ConstInput> const_inputs;
|
||||
|
||||
std::vector<cv::Scalar> mean;
|
||||
std::vector<cv::Scalar> stdev;
|
||||
|
||||
std::vector<cv::GMatDesc> out_metas;
|
||||
PostProc custom_post_proc;
|
||||
|
||||
std::vector<bool> normalize;
|
||||
};
|
||||
} // namespace detail
|
||||
|
||||
template<typename Net>
|
||||
struct PortCfg {
|
||||
using In = std::array
|
||||
< std::string
|
||||
, std::tuple_size<typename Net::InArgs>::value >;
|
||||
using Out = std::array
|
||||
< std::string
|
||||
, std::tuple_size<typename Net::OutArgs>::value >;
|
||||
using NormCoefs = std::array
|
||||
< cv::Scalar
|
||||
, std::tuple_size<typename Net::InArgs>::value >;
|
||||
using Normalize = std::array
|
||||
< bool
|
||||
, std::tuple_size<typename Net::InArgs>::value >;
|
||||
};
|
||||
|
||||
template<typename Net> class Params {
|
||||
public:
|
||||
Params(const std::string &model) {
|
||||
desc.model_path = model;
|
||||
desc.num_in = std::tuple_size<typename Net::InArgs>::value;
|
||||
desc.num_out = std::tuple_size<typename Net::OutArgs>::value;
|
||||
};
|
||||
|
||||
// BEGIN(G-API's network parametrization API)
|
||||
GBackend backend() const { return cv::gapi::onnx::backend(); }
|
||||
std::string tag() const { return Net::tag(); }
|
||||
cv::util::any params() const { return { desc }; }
|
||||
// END(G-API's network parametrization API)
|
||||
|
||||
Params<Net>& cfgInputLayers(const typename PortCfg<Net>::In &ll) {
|
||||
desc.input_names.assign(ll.begin(), ll.end());
|
||||
return *this;
|
||||
}
|
||||
|
||||
Params<Net>& cfgOutputLayers(const typename PortCfg<Net>::Out &ll) {
|
||||
desc.output_names.assign(ll.begin(), ll.end());
|
||||
return *this;
|
||||
}
|
||||
|
||||
Params<Net>& constInput(const std::string &layer_name,
|
||||
const cv::Mat &data,
|
||||
TraitAs hint = TraitAs::TENSOR) {
|
||||
desc.const_inputs[layer_name] = {data, hint};
|
||||
return *this;
|
||||
}
|
||||
|
||||
Params<Net>& cfgMeanStd(const typename PortCfg<Net>::NormCoefs &m,
|
||||
const typename PortCfg<Net>::NormCoefs &s) {
|
||||
desc.mean.assign(m.begin(), m.end());
|
||||
desc.stdev.assign(s.begin(), s.end());
|
||||
return *this;
|
||||
}
|
||||
|
||||
Params<Net>& cfgPostProc(const std::vector<cv::GMatDesc> &outs,
|
||||
const PostProc &pp) {
|
||||
desc.out_metas = outs;
|
||||
desc.custom_post_proc = pp;
|
||||
return *this;
|
||||
}
|
||||
|
||||
Params<Net>& cfgNormalize(const typename PortCfg<Net>::Normalize &n) {
|
||||
desc.normalize.assign(n.begin(), n.end());
|
||||
return *this;
|
||||
}
|
||||
|
||||
protected:
|
||||
detail::ParamDesc desc;
|
||||
};
|
||||
|
||||
} // namespace onnx
|
||||
} // namespace gapi
|
||||
} // namespace cv
|
||||
|
||||
#endif // OPENCV_GAPI_INFER_HPP
|
||||
137
inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/infer/parsers.hpp
vendored
Normal file
137
inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/infer/parsers.hpp
vendored
Normal file
@@ -0,0 +1,137 @@
|
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
//
|
||||
// Copyright (C) 2020 Intel Corporation
|
||||
|
||||
|
||||
#ifndef OPENCV_GAPI_PARSERS_HPP
|
||||
#define OPENCV_GAPI_PARSERS_HPP
|
||||
|
||||
#include <utility> // std::tuple
|
||||
|
||||
#include <opencv2/gapi/gmat.hpp>
|
||||
#include <opencv2/gapi/gkernel.hpp>
|
||||
|
||||
namespace cv { namespace gapi {
|
||||
namespace nn {
|
||||
namespace parsers {
|
||||
using GRects = GArray<Rect>;
|
||||
using GDetections = std::tuple<GArray<Rect>, GArray<int>>;
|
||||
|
||||
G_TYPED_KERNEL(GParseSSDBL, <GDetections(GMat, GOpaque<Size>, float, int)>,
|
||||
"org.opencv.nn.parsers.parseSSD_BL") {
|
||||
static std::tuple<GArrayDesc,GArrayDesc> outMeta(const GMatDesc&, const GOpaqueDesc&, float, int) {
|
||||
return std::make_tuple(empty_array_desc(), empty_array_desc());
|
||||
}
|
||||
};
|
||||
|
||||
G_TYPED_KERNEL(GParseSSD, <GRects(GMat, GOpaque<Size>, float, bool, bool)>,
|
||||
"org.opencv.nn.parsers.parseSSD") {
|
||||
static GArrayDesc outMeta(const GMatDesc&, const GOpaqueDesc&, float, bool, bool) {
|
||||
return empty_array_desc();
|
||||
}
|
||||
};
|
||||
|
||||
G_TYPED_KERNEL(GParseYolo, <GDetections(GMat, GOpaque<Size>, float, float, std::vector<float>)>,
|
||||
"org.opencv.nn.parsers.parseYolo") {
|
||||
static std::tuple<GArrayDesc, GArrayDesc> outMeta(const GMatDesc&, const GOpaqueDesc&,
|
||||
float, float, const std::vector<float>&) {
|
||||
return std::make_tuple(empty_array_desc(), empty_array_desc());
|
||||
}
|
||||
static const std::vector<float>& defaultAnchors() {
|
||||
static std::vector<float> anchors {
|
||||
0.57273f, 0.677385f, 1.87446f, 2.06253f, 3.33843f, 5.47434f, 7.88282f, 3.52778f, 9.77052f, 9.16828f
|
||||
};
|
||||
return anchors;
|
||||
}
|
||||
};
|
||||
} // namespace parsers
|
||||
} // namespace nn
|
||||
|
||||
/** @brief Parses output of SSD network.
|
||||
|
||||
Extracts detection information (box, confidence, label) from SSD output and
|
||||
filters it by given confidence and label.
|
||||
|
||||
@note Function textual ID is "org.opencv.nn.parsers.parseSSD_BL"
|
||||
|
||||
@param in Input CV_32F tensor with {1,1,N,7} dimensions.
|
||||
@param inSz Size to project detected boxes to (size of the input image).
|
||||
@param confidenceThreshold If confidence of the
|
||||
detection is smaller than confidence threshold, detection is rejected.
|
||||
@param filterLabel If provided (!= -1), only detections with
|
||||
given label will get to the output.
|
||||
@return a tuple with a vector of detected boxes and a vector of appropriate labels.
|
||||
*/
|
||||
GAPI_EXPORTS std::tuple<GArray<Rect>, GArray<int>> parseSSD(const GMat& in,
|
||||
const GOpaque<Size>& inSz,
|
||||
const float confidenceThreshold = 0.5f,
|
||||
const int filterLabel = -1);
|
||||
|
||||
/** @overload
|
||||
Extracts detection information (box, confidence) from SSD output and
|
||||
filters it by given confidence and by going out of bounds.
|
||||
|
||||
@note Function textual ID is "org.opencv.nn.parsers.parseSSD"
|
||||
|
||||
@param in Input CV_32F tensor with {1,1,N,7} dimensions.
|
||||
@param inSz Size to project detected boxes to (size of the input image).
|
||||
@param confidenceThreshold If confidence of the
|
||||
detection is smaller than confidence threshold, detection is rejected.
|
||||
@param alignmentToSquare If provided true, bounding boxes are extended to squares.
|
||||
The center of the rectangle remains unchanged, the side of the square is
|
||||
the larger side of the rectangle.
|
||||
@param filterOutOfBounds If provided true, out-of-frame boxes are filtered.
|
||||
@return a vector of detected bounding boxes.
|
||||
*/
|
||||
GAPI_EXPORTS GArray<Rect> parseSSD(const GMat& in,
|
||||
const GOpaque<Size>& inSz,
|
||||
const float confidenceThreshold = 0.5f,
|
||||
const bool alignmentToSquare = false,
|
||||
const bool filterOutOfBounds = false);
|
||||
|
||||
/** @brief Parses output of Yolo network.
|
||||
|
||||
Extracts detection information (box, confidence, label) from Yolo output,
|
||||
filters it by given confidence and performs non-maximum supression for overlapping boxes.
|
||||
|
||||
@note Function textual ID is "org.opencv.nn.parsers.parseYolo"
|
||||
|
||||
@param in Input CV_32F tensor with {1,13,13,N} dimensions, N should satisfy:
|
||||
\f[\texttt{N} = (\texttt{num_classes} + \texttt{5}) * \texttt{5},\f]
|
||||
where num_classes - a number of classes Yolo network was trained with.
|
||||
@param inSz Size to project detected boxes to (size of the input image).
|
||||
@param confidenceThreshold If confidence of the
|
||||
detection is smaller than confidence threshold, detection is rejected.
|
||||
@param nmsThreshold Non-maximum supression threshold which controls minimum
|
||||
relative box intersection area required for rejecting the box with a smaller confidence.
|
||||
If 1.f, nms is not performed and no boxes are rejected.
|
||||
@param anchors Anchors Yolo network was trained with.
|
||||
@note The default anchor values are taken from openvinotoolkit docs:
|
||||
https://docs.openvinotoolkit.org/latest/omz_models_intel_yolo_v2_tiny_vehicle_detection_0001_description_yolo_v2_tiny_vehicle_detection_0001.html#output.
|
||||
@return a tuple with a vector of detected boxes and a vector of appropriate labels.
|
||||
*/
|
||||
GAPI_EXPORTS std::tuple<GArray<Rect>, GArray<int>> parseYolo(const GMat& in,
|
||||
const GOpaque<Size>& inSz,
|
||||
const float confidenceThreshold = 0.5f,
|
||||
const float nmsThreshold = 0.5f,
|
||||
const std::vector<float>& anchors
|
||||
= nn::parsers::GParseYolo::defaultAnchors());
|
||||
|
||||
} // namespace gapi
|
||||
} // namespace cv
|
||||
|
||||
// Reimport parseSSD & parseYolo under their initial namespace
|
||||
namespace cv {
|
||||
namespace gapi {
|
||||
namespace streaming {
|
||||
|
||||
using cv::gapi::parseSSD;
|
||||
using cv::gapi::parseYolo;
|
||||
|
||||
} // namespace streaming
|
||||
} // namespace gapi
|
||||
} // namespace cv
|
||||
|
||||
#endif // OPENCV_GAPI_PARSERS_HPP
|
||||
73
inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/media.hpp
vendored
Normal file
73
inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/media.hpp
vendored
Normal file
@@ -0,0 +1,73 @@
|
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
//
|
||||
// Copyright (C) 2020 Intel Corporation
|
||||
|
||||
#ifndef OPENCV_GAPI_MEDIA_HPP
|
||||
#define OPENCV_GAPI_MEDIA_HPP
|
||||
|
||||
#include <memory> // unique_ptr<>, shared_ptr<>
|
||||
#include <array> // array<>
|
||||
#include <functional> // function<>
|
||||
#include <utility> // forward<>()
|
||||
|
||||
#include <opencv2/gapi/gframe.hpp>
|
||||
|
||||
namespace cv {
|
||||
|
||||
class GAPI_EXPORTS MediaFrame {
|
||||
public:
|
||||
enum class Access { R, W };
|
||||
class IAdapter;
|
||||
class View;
|
||||
using AdapterPtr = std::unique_ptr<IAdapter>;
|
||||
|
||||
MediaFrame();
|
||||
explicit MediaFrame(AdapterPtr &&);
|
||||
template<class T, class... Args> static cv::MediaFrame Create(Args&&...);
|
||||
|
||||
View access(Access) const;
|
||||
cv::GFrameDesc desc() const;
|
||||
|
||||
private:
|
||||
struct Priv;
|
||||
std::shared_ptr<Priv> m;
|
||||
};
|
||||
|
||||
template<class T, class... Args>
|
||||
inline cv::MediaFrame cv::MediaFrame::Create(Args&&... args) {
|
||||
std::unique_ptr<T> ptr(new T(std::forward<Args>(args)...));
|
||||
return cv::MediaFrame(std::move(ptr));
|
||||
}
|
||||
|
||||
class GAPI_EXPORTS MediaFrame::View final {
|
||||
public:
|
||||
static constexpr const size_t MAX_PLANES = 4;
|
||||
using Ptrs = std::array<void*, MAX_PLANES>;
|
||||
using Strides = std::array<std::size_t, MAX_PLANES>; // in bytes
|
||||
using Callback = std::function<void()>;
|
||||
|
||||
View(Ptrs&& ptrs, Strides&& strs, Callback &&cb = [](){});
|
||||
View(const View&) = delete;
|
||||
View(View&&) = default;
|
||||
View& operator = (const View&) = delete;
|
||||
~View();
|
||||
|
||||
Ptrs ptr;
|
||||
Strides stride;
|
||||
|
||||
private:
|
||||
Callback m_cb;
|
||||
};
|
||||
|
||||
class GAPI_EXPORTS MediaFrame::IAdapter {
|
||||
public:
|
||||
virtual ~IAdapter() = 0;
|
||||
virtual cv::GFrameDesc meta() const = 0;
|
||||
virtual MediaFrame::View access(MediaFrame::Access) = 0;
|
||||
};
|
||||
|
||||
} //namespace cv
|
||||
|
||||
#endif // OPENCV_GAPI_MEDIA_HPP
|
||||
@@ -16,7 +16,7 @@ namespace gapi {
|
||||
namespace core {
|
||||
namespace ocl {
|
||||
|
||||
GAPI_EXPORTS GKernelPackage kernels();
|
||||
GAPI_EXPORTS_W cv::gapi::GKernelPackage kernels();
|
||||
|
||||
} // namespace ocl
|
||||
} // namespace core
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
//
|
||||
// Copyright (C) 2018-2019 Intel Corporation
|
||||
// Copyright (C) 2018-2020 Intel Corporation
|
||||
|
||||
|
||||
#ifndef OPENCV_GAPI_GOCLKERNEL_HPP
|
||||
@@ -75,7 +75,7 @@ public:
|
||||
|
||||
protected:
|
||||
detail::VectorRef& outVecRef(int output);
|
||||
detail::VectorRef& outOpaqueRef(int output);
|
||||
detail::OpaqueRef& outOpaqueRef(int output);
|
||||
|
||||
std::vector<GArg> m_args;
|
||||
std::unordered_map<std::size_t, GRunArgP> m_results;
|
||||
|
||||
@@ -16,6 +16,17 @@
|
||||
# include <opencv2/core/base.hpp>
|
||||
#else // Without OpenCV
|
||||
# include <opencv2/gapi/own/cvdefs.hpp>
|
||||
# include <opencv2/gapi/own/types.hpp> // cv::gapi::own::Rect/Size/Point
|
||||
# include <opencv2/gapi/own/scalar.hpp> // cv::gapi::own::Scalar
|
||||
# include <opencv2/gapi/own/mat.hpp>
|
||||
// replacement of cv's structures:
|
||||
namespace cv {
|
||||
using Rect = gapi::own::Rect;
|
||||
using Size = gapi::own::Size;
|
||||
using Point = gapi::own::Point;
|
||||
using Scalar = gapi::own::Scalar;
|
||||
using Mat = gapi::own::Mat;
|
||||
} // namespace cv
|
||||
#endif // !defined(GAPI_STANDALONE)
|
||||
|
||||
#endif // OPENCV_GAPI_OPENCV_INCLUDES_HPP
|
||||
|
||||
@@ -11,6 +11,8 @@
|
||||
#include <opencv2/gapi/gmat.hpp>
|
||||
#include <opencv2/gapi/gscalar.hpp>
|
||||
|
||||
namespace cv
|
||||
{
|
||||
GAPI_EXPORTS cv::GMat operator+(const cv::GMat& lhs, const cv::GMat& rhs);
|
||||
|
||||
GAPI_EXPORTS cv::GMat operator+(const cv::GMat& lhs, const cv::GScalar& rhs);
|
||||
@@ -63,7 +65,6 @@ GAPI_EXPORTS cv::GMat operator<(const cv::GScalar& lhs, const cv::GMat& rhs);
|
||||
GAPI_EXPORTS cv::GMat operator<=(const cv::GScalar& lhs, const cv::GMat& rhs);
|
||||
GAPI_EXPORTS cv::GMat operator==(const cv::GScalar& lhs, const cv::GMat& rhs);
|
||||
GAPI_EXPORTS cv::GMat operator!=(const cv::GScalar& lhs, const cv::GMat& rhs);
|
||||
|
||||
|
||||
} // cv
|
||||
|
||||
#endif // OPENCV_GAPI_OPERATORS_HPP
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
|
||||
namespace detail
|
||||
{
|
||||
inline void assert_abort(const char* str, int line, const char* file, const char* func)
|
||||
[[noreturn]] inline void assert_abort(const char* str, int line, const char* file, const char* func)
|
||||
{
|
||||
std::stringstream ss;
|
||||
ss << file << ":" << line << ": Assertion " << str << " in function " << func << " failed\n";
|
||||
|
||||
@@ -25,23 +25,27 @@ namespace cv
|
||||
return result;
|
||||
}
|
||||
|
||||
cv::gapi::own::Mat to_own(Mat&&) = delete;
|
||||
cv::gapi::own::Mat to_own(Mat&&) = delete;
|
||||
|
||||
inline cv::gapi::own::Mat to_own(Mat const& m) {
|
||||
return (m.dims == 2)
|
||||
? cv::gapi::own::Mat{m.rows, m.cols, m.type(), m.data, m.step}
|
||||
: cv::gapi::own::Mat{to_own<int>(m.size), m.type(), m.data};
|
||||
};
|
||||
|
||||
namespace gapi
|
||||
{
|
||||
namespace own
|
||||
{
|
||||
|
||||
inline cv::Mat to_ocv(Mat const& m) {
|
||||
return m.dims.empty()
|
||||
? cv::Mat{m.rows, m.cols, m.type(), m.data, m.step}
|
||||
: cv::Mat{m.dims, m.type(), m.data};
|
||||
}
|
||||
cv::Mat to_ocv(Mat&&) = delete;
|
||||
|
||||
cv::Mat to_ocv(Mat&&) = delete;
|
||||
|
||||
} // namespace own
|
||||
} // namespace gapi
|
||||
} // namespace cv
|
||||
|
||||
@@ -9,9 +9,6 @@
|
||||
#define OPENCV_GAPI_CV_DEFS_HPP
|
||||
|
||||
#if defined(GAPI_STANDALONE)
|
||||
#include <opencv2/gapi/own/types.hpp> // cv::gapi::own::Rect/Size/Point
|
||||
#include <opencv2/gapi/own/scalar.hpp> // cv::gapi::own::Scalar
|
||||
|
||||
// Simulate OpenCV definitions taken from various
|
||||
// OpenCV interface headers if G-API is built in a
|
||||
// standalone mode.
|
||||
@@ -139,11 +136,6 @@ enum InterpolationFlags{
|
||||
INTER_LINEAR_EXACT = 5,
|
||||
INTER_MAX = 7,
|
||||
};
|
||||
// replacement of cv's structures:
|
||||
using Rect = gapi::own::Rect;
|
||||
using Size = gapi::own::Size;
|
||||
using Point = gapi::own::Point;
|
||||
using Scalar = gapi::own::Scalar;
|
||||
} // namespace cv
|
||||
|
||||
static inline int cvFloor( double value )
|
||||
|
||||
@@ -11,8 +11,15 @@
|
||||
# if defined(__OPENCV_BUILD)
|
||||
# include <opencv2/core/base.hpp>
|
||||
# define GAPI_EXPORTS CV_EXPORTS
|
||||
/* special informative macros for wrapper generators */
|
||||
# define GAPI_WRAP CV_WRAP
|
||||
# define GAPI_EXPORTS_W_SIMPLE CV_EXPORTS_W_SIMPLE
|
||||
# define GAPI_EXPORTS_W CV_EXPORTS_W
|
||||
# else
|
||||
# define GAPI_WRAP
|
||||
# define GAPI_EXPORTS
|
||||
# define GAPI_EXPORTS_W_SIMPLE
|
||||
# define GAPI_EXPORTS_W
|
||||
|
||||
#if 0 // Note: the following version currently is not needed for non-OpenCV build
|
||||
# if defined _WIN32
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user