fluid update WW52 2020 OCV 4.5.1 - attempt#2 (#3698)

* fluid update WW52 2020 OCV 4.5.1 - attempt#2

* fluid update WW52 2020 OCV 4.5.1 - attempt#3 with build fixes

* fluid update WW52 2020 OCV 4.5.1 - attempt#3
This commit is contained in:
Dmitry Budnikov 2020-12-25 18:25:20 +03:00 committed by GitHub
parent a66ab37455
commit 0547934c24
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
186 changed files with 18516 additions and 1679 deletions

View File

@ -1 +1 @@
d8947b3280c8644f9828fac2b36f5f5a
91e7c0aaa00be504e8e6692d0b3b86c1

View File

@ -38,6 +38,10 @@ if(MSVC)
endif()
endif()
if(CMAKE_CXX_COMPILER_ID STREQUAL "AppleClang") # don't add Clang here: issue should be investigated and fixed (workaround for Apple only)
ocv_warnings_disable(CMAKE_CXX_FLAGS -Wrange-loop-analysis) # https://github.com/opencv/opencv/issues/18928
endif()
file(GLOB gapi_ext_hdrs
"${CMAKE_CURRENT_LIST_DIR}/include/opencv2/*.hpp"
"${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/*.hpp"
@ -49,6 +53,7 @@ file(GLOB gapi_ext_hdrs
"${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/ocl/*.hpp"
"${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/own/*.hpp"
"${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/render/*.hpp"
"${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/s11n/*.hpp"
"${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/streaming/*.hpp"
"${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/plaidml/*.hpp"
"${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/util/*.hpp"
@ -56,6 +61,7 @@ file(GLOB gapi_ext_hdrs
set(gapi_srcs
# Front-end part
src/api/grunarg.cpp
src/api/gorigin.cpp
src/api/gmat.cpp
src/api/garray.cpp
@ -73,10 +79,12 @@ set(gapi_srcs
src/api/kernels_imgproc.cpp
src/api/kernels_video.cpp
src/api/kernels_nnparsers.cpp
src/api/kernels_streaming.cpp
src/api/render.cpp
src/api/render_ocv.cpp
src/api/ginfer.cpp
src/api/ft_render.cpp
src/api/media.cpp
src/api/rmat.cpp
# Compiler part
src/compiler/gmodel.cpp
@ -95,9 +103,11 @@ set(gapi_srcs
src/compiler/passes/pattern_matching.cpp
src/compiler/passes/perform_substitution.cpp
src/compiler/passes/streaming.cpp
src/compiler/passes/intrin.cpp
# Executor
src/executor/gexecutor.cpp
src/executor/gtbbexecutor.cpp
src/executor/gstreamingexecutor.cpp
src/executor/gasync.cpp
@ -127,21 +137,31 @@ set(gapi_srcs
src/backends/ie/giebackend.cpp
src/backends/ie/giebackend/giewrapper.cpp
# Render Backend.
src/backends/render/grenderocvbackend.cpp
src/backends/render/grenderocv.cpp
# ONNX backend
src/backends/onnx/gonnxbackend.cpp
#PlaidML Backend
# Render backend
src/backends/render/grenderocv.cpp
src/backends/render/ft_render.cpp
# PlaidML Backend
src/backends/plaidml/gplaidmlcore.cpp
src/backends/plaidml/gplaidmlbackend.cpp
# Compound
# Common backend code
src/backends/common/gmetabackend.cpp
src/backends/common/gcompoundbackend.cpp
src/backends/common/gcompoundkernel.cpp
# Serialization API and routines
src/api/s11n.cpp
src/backends/common/serialization.cpp
# Streaming backend
src/backends/streaming/gstreamingbackend.cpp
# Python bridge
src/backends/ie/bindings_ie.cpp
)
ocv_add_dispatched_file(backends/fluid/gfluidimgproc_func SSE4_1 AVX2)
@ -180,6 +200,10 @@ if(TARGET opencv_test_gapi)
target_link_libraries(opencv_test_gapi PRIVATE ade)
endif()
if(HAVE_TBB AND TARGET opencv_test_gapi)
ocv_target_link_libraries(opencv_test_gapi PRIVATE tbb)
endif()
if(HAVE_FREETYPE)
ocv_target_compile_definitions(${the_module} PRIVATE -DHAVE_FREETYPE)
if(TARGET opencv_test_gapi)
@ -198,10 +222,20 @@ if(HAVE_PLAIDML)
ocv_target_include_directories(${the_module} SYSTEM PRIVATE ${PLAIDML_INCLUDE_DIRS})
endif()
if(WIN32)
# Required for htonl/ntohl on Windows
ocv_target_link_libraries(${the_module} PRIVATE wsock32 ws2_32)
endif()
if(HAVE_ONNX)
ocv_target_link_libraries(${the_module} PRIVATE ${ONNX_LIBRARY})
ocv_target_compile_definitions(${the_module} PRIVATE HAVE_ONNX=1)
if(TARGET opencv_test_gapi)
ocv_target_compile_definitions(opencv_test_gapi PRIVATE HAVE_ONNX=1)
ocv_target_link_libraries(opencv_test_gapi PRIVATE ${ONNX_LIBRARY})
endif()
endif()
ocv_add_perf_tests()
ocv_add_samples()

View File

@ -15,10 +15,13 @@ file(GLOB FLUID_includes "${FLUID_ROOT}/include/opencv2/*.hpp"
"${FLUID_ROOT}/include/opencv2/gapi/own/*.hpp"
"${FLUID_ROOT}/include/opencv2/gapi/fluid/*.hpp")
file(GLOB FLUID_sources "${FLUID_ROOT}/src/api/g*.cpp"
"${FLUID_ROOT}/src/api/rmat.cpp"
"${FLUID_ROOT}/src/api/media.cpp"
"${FLUID_ROOT}/src/compiler/*.cpp"
"${FLUID_ROOT}/src/compiler/passes/*.cpp"
"${FLUID_ROOT}/src/executor/*.cpp"
"${FLUID_ROOT}/src/backends/fluid/*.cpp"
"${FLUID_ROOT}/src/backends/streaming/*.cpp"
"${FLUID_ROOT}/src/backends/common/*.cpp")
add_library(${FLUID_TARGET} STATIC ${FLUID_includes} ${FLUID_sources})

View File

@ -1,10 +1,10 @@
#+TITLE: OpenCV 4.0 Graph API
#+TITLE: OpenCV 4.4 Graph API
#+AUTHOR: Dmitry Matveev\newline Intel Corporation
#+OPTIONS: H:2 toc:t num:t
#+LATEX_CLASS: beamer
#+LATEX_CLASS_OPTIONS: [presentation]
#+LATEX_HEADER: \usepackage{transparent} \usepackage{listings} \usepackage{pgfplots} \usepackage{mtheme.sty/beamerthememetropolis}
#+LATEX_HEADER: \setbeamertemplate{frame footer}{OpenCV 4.0 G-API: Overview and programming by example}
#+LATEX_HEADER: \setbeamertemplate{frame footer}{OpenCV 4.4 G-API: Overview and programming by example}
#+BEAMER_HEADER: \subtitle{Overview and programming by example}
#+BEAMER_HEADER: \titlegraphic{ \vspace*{3cm}\hspace*{5cm} {\transparent{0.2}\includegraphics[height=\textheight]{ocv_logo.eps}}}
#+COLUMNS: %45ITEM %10BEAMER_ENV(Env) %10BEAMER_ACT(Act) %4BEAMER_COL(Col) %8BEAMER_OPT(Opt)
@ -21,7 +21,7 @@
- OpenCV meets C++, ~cv::Mat~ replaces ~IplImage*~;
*** Version 3.0: -- Welcome Transparent API (T-API)
*** Version 3.0 -- Welcome Transparent API (T-API)
- ~cv::UMat~ is introduced as a /transparent/ addition to
~cv::Mat~;
@ -32,7 +32,7 @@
** OpenCV evolution in one slide (cont'd)
# FIXME: Learn proper page-breaking!
*** Version 4.0: -- Welcome Graph API (G-API)
*** Version 4.0 -- Welcome Graph API (G-API)
- A new separate module (not a full library rewrite);
- A framework (or even a /meta/-framework);
@ -45,6 +45,24 @@
- Kernels can be written in unconstrained platform-native code;
- Halide can serve as a backend (one of many).
** OpenCV evolution in one slide (cont'd)
# FIXME: Learn proper page-breaking!
*** Version 4.2 -- New horizons
- Introduced in-graph inference via OpenVINO™ Toolkit;
- Introduced video-oriented Streaming execution mode;
- Extended focus from individual image processing to the full
application pipeline optimization.
*** Version 4.4 -- More on video
- Introduced a notion of stateful kernels;
- The road to object tracking, background subtraction, etc. in the
graph;
- Added more video-oriented operations (feature detection, Optical
flow).
** Why G-API?
*** Why introduce a new execution model?
@ -80,7 +98,7 @@
- *Heterogeneity* gets extra benefits like:
- Avoiding unnecessary data transfers;
- Shadowing transfer costs with parallel host co-execution;
- Increasing system throughput with frame-level pipelining.
- Improving system throughput with frame-level pipelining.
* Programming with G-API
@ -96,7 +114,34 @@
- What data objects are /inputs/ to the graph?
- What are its /outputs/?
** A code is worth a thousand words
** The code is worth a thousand words
:PROPERTIES:
:BEAMER_opt: shrink=42
:END:
#+BEGIN_SRC C++
#include <opencv2/gapi.hpp> // G-API framework header
#include <opencv2/gapi/imgproc.hpp> // cv::gapi::blur()
#include <opencv2/highgui.hpp> // cv::imread/imwrite
int main(int argc, char *argv[]) {
if (argc < 3) return 1;
cv::GMat in; // Express the graph:
cv::GMat out = cv::gapi::blur(in, cv::Size(3,3)); // `out` is a result of `blur` of `in`
cv::Mat in_mat = cv::imread(argv[1]); // Get the real data
cv::Mat out_mat; // Output buffer (may be empty)
cv::GComputation(cv::GIn(in), cv::GOut(out)) // Declare a graph from `in` to `out`
.apply(cv::gin(in_mat), cv::gout(out_mat)); // ...and run it immediately
cv::imwrite(argv[2], out_mat); // Save the result
return 0;
}
#+END_SRC
** The code is worth a thousand words
:PROPERTIES:
:BEAMER_opt: shrink=42
:END:
@ -161,7 +206,7 @@ int main(int argc, char *argv[]) {
}
#+END_SRC
** A code is worth a thousand words (cont'd)
** The code is worth a thousand words (cont'd)
# FIXME: sections!!!
*** What we have just learned?
@ -183,59 +228,82 @@ cv::GComputation(cv::GIn(...), cv::GOut(...))
** On data objects
Graph *protocol* defines what arguments a computation was defined on
(both inputs and outputs), and what are the *shapes* (or types) of
those arguments:
(both inputs and outputs), and what are the *shapes* (or types) of
those arguments:
| *Shape* | *Argument* | Size |
|-------------+------------------+-----------------------------|
| ~GMat~ | ~Mat~ | Static; defined during |
| | | graph compilation |
|-------------+------------------+-----------------------------|
| ~GScalar~ | ~Scalar~ | 4 x ~double~ |
|-------------+------------------+-----------------------------|
| ~GArray<T>~ | ~std::vector<T>~ | Dynamic; defined in runtime |
| *Shape* | *Argument* | Size |
|--------------+------------------+-----------------------------|
| ~GMat~ | ~Mat~ | Static; defined during |
| | | graph compilation |
|--------------+------------------+-----------------------------|
| ~GScalar~ | ~Scalar~ | 4 x ~double~ |
|--------------+------------------+-----------------------------|
| ~GArray<T>~ | ~std::vector<T>~ | Dynamic; defined in runtime |
|--------------+------------------+-----------------------------|
| ~GOpaque<T>~ | ~T~ | Static, ~sizeof(T)~ |
~GScalar~ may be value-initialized at construction time to allow
expressions like ~GMat a = 2*(b + 1)~.
** Customization example
** On operations and kernels
:PROPERTIES:
:BEAMER_opt: shrink=22
:END:
*** Tuning the execution
*** :B_block:BMCOL:
:PROPERTIES:
:BEAMER_env: block
:BEAMER_col: 0.45
:END:
- Graph execution model is defined by kernels which are used;
- Kernels can be specified in graph compilation arguments:
#+LaTeX: {\footnotesize
#+BEGIN_SRC C++
#include <opencv2/gapi/fluid/core.hpp>
#include <opencv2/gapi/fluid/imgproc.hpp>
...
auto pkg = gapi::combine(gapi::core::fluid::kernels(),
gapi::imgproc::fluid::kernels(),
cv::unite_policy::KEEP);
sobel.apply(in_mat, out_mat, compile_args(pkg));
#+END_SRC
#+LaTeX: }
- OpenCL backend can be used in the same way;
#+LaTeX: {\footnotesize
- *NOTE*: ~cv::unite_policy~ has been removed in OpenCV 4.1.1.
#+LaTeX: }
- Graphs are built with *Operations* over virtual *Data*;
- *Operations* define interfaces (literally);
- *Kernels* are implementations to *Operations* (like in OOP);
- An *Operation* is platform-agnostic, a *kernel* is not;
- *Kernels* are implemented for *Backends*, the latter provide
APIs to write kernels;
- Users can /add/ their *own* operations and kernels,
and also /redefine/ "standard" kernels their *own* way.
** Operations and Kernels
*** :B_block:BMCOL:
:PROPERTIES:
:BEAMER_env: block
:BEAMER_col: 0.45
:END:
*** Specifying a kernel package
#+BEGIN_SRC dot :file "000-ops-kernels.eps" :cmdline "-Kdot -Teps"
digraph G {
node [shape=box];
rankdir=BT;
- A *kernel* is an implementation of *operation* (= interface);
- A *kernel package* hosts kernels that G-API should use;
- Kernels are written for different *backends* and using their APIs;
- Two kernel packages can be *merged* into a single one;
- User can safely supply his *own kernels* to either /replace/ or
/augment/ the default package.
- Yes, even the standard kernels can be /overwritten/ by user from
the outside!
- *Heterogeneous* kernel package hosts kernels of different backends.
Gr [label="Graph"];
Op [label="Operation\nA"];
{rank=same
Impl1 [label="Kernel\nA:2"];
Impl2 [label="Kernel\nA:1"];
}
** Operations and Kernels (cont'd)
# FIXME!!!
Op -> Gr [dir=back, label="'consists of'"];
Impl1 -> Op [];
Impl2 -> Op [label="'is implemented by'"];
node [shape=note,style=dashed];
{rank=same
Op;
CommentOp [label="Abstract:\ndeclared via\nG_API_OP()"];
}
{rank=same
Comment1 [label="Platform:\ndefined with\nOpenCL backend"];
Comment2 [label="Platform:\ndefined with\nOpenCV backend"];
}
CommentOp -> Op [constraint=false, style=dashed, arrowhead=none];
Comment1 -> Impl1 [style=dashed, arrowhead=none];
Comment2 -> Impl2 [style=dashed, arrowhead=none];
}
#+END_SRC
** On operations and kernels (cont'd)
*** Defining an operation
@ -245,16 +313,43 @@ Graph *protocol* defines what arguments a computation was defined on
- Metadata callback -- describe what is the output value format(s),
given the input and arguments.
- Use ~OpType::on(...)~ to use a new kernel ~OpType~ to construct graphs.
#+LaTeX: {\footnotesize
#+BEGIN_SRC C++
G_TYPED_KERNEL(GSqrt,<GMat(GMat)>,"org.opencv.core.math.sqrt") {
G_API_OP(GSqrt,<GMat(GMat)>,"org.opencv.core.math.sqrt") {
static GMatDesc outMeta(GMatDesc in) { return in; }
};
#+END_SRC
#+LaTeX: }
** Operations and Kernels (cont'd)
# FIXME!!!
** On operations and kernels (cont'd)
*** ~GSqrt~ vs. ~cv::gapi::sqrt()~
- How a *type* relates to a *functions* from the example?
- These functions are just wrappers over ~::on~:
#+LaTeX: {\scriptsize
#+BEGIN_SRC C++
G_API_OP(GSqrt,<GMat(GMat)>,"org.opencv.core.math.sqrt") {
static GMatDesc outMeta(GMatDesc in) { return in; }
};
GMat gapi::sqrt(const GMat& src) { return GSqrt::on(src); }
#+END_SRC
#+LaTeX: }
- Why -- Doxygen, default parameters, 1:n mapping:
#+LaTeX: {\scriptsize
#+BEGIN_SRC C++
cv::GMat custom::unsharpMask(const cv::GMat &src,
const int sigma,
const float strength) {
cv::GMat blurred = cv::gapi::medianBlur(src, sigma);
cv::GMat laplacian = cv::gapi::Laplacian(blurred, CV_8U);
return (src - (laplacian * strength));
}
#+END_SRC
#+LaTeX: }
** On operations and kernels (cont'd)
*** Implementing an operation
@ -297,6 +392,467 @@ G_TYPED_KERNEL(GSqrt,<GMat(GMat)>,"org.opencv.core.math.sqrt") {
- Note ~run~ changes signature but still is derived from the operation
signature.
** Operations and Kernels (cont'd)
*** Specifying which kernels to use
- Graph execution model is defined by kernels which are available/used;
- Kernels can be specified via the graph compilation arguments:
#+LaTeX: {\footnotesize
#+BEGIN_SRC C++
#include <opencv2/gapi/fluid/core.hpp>
#include <opencv2/gapi/fluid/imgproc.hpp>
...
auto pkg = cv::gapi::combine(cv::gapi::core::fluid::kernels(),
cv::gapi::imgproc::fluid::kernels());
sobel.apply(in_mat, out_mat, cv::compile_args(pkg));
#+END_SRC
#+LaTeX: }
- Users can combine kernels of different backends and G-API will partition
the execution among those automatically.
** Heterogeneity in G-API
:PROPERTIES:
:BEAMER_opt: shrink=35
:END:
*** Automatic subgraph partitioning in G-API
*** :B_block:BMCOL:
:PROPERTIES:
:BEAMER_env: block
:BEAMER_col: 0.18
:END:
#+BEGIN_SRC dot :file "010-hetero-init.eps" :cmdline "-Kdot -Teps"
digraph G {
rankdir=TB;
ranksep=0.3;
node [shape=box margin=0 height=0.25];
A; B; C;
node [shape=ellipse];
GMat0;
GMat1;
GMat2;
GMat3;
GMat0 -> A -> GMat1 -> B -> GMat2;
GMat2 -> C;
GMat0 -> C -> GMat3
subgraph cluster {style=invis; A; GMat1; B; GMat2; C};
}
#+END_SRC
The initial graph: operations are not resolved yet.
*** :B_block:BMCOL:
:PROPERTIES:
:BEAMER_env: block
:BEAMER_col: 0.18
:END:
#+BEGIN_SRC dot :file "011-hetero-homo.eps" :cmdline "-Kdot -Teps"
digraph G {
rankdir=TB;
ranksep=0.3;
node [shape=box margin=0 height=0.25];
A; B; C;
node [shape=ellipse];
GMat0;
GMat1;
GMat2;
GMat3;
GMat0 -> A -> GMat1 -> B -> GMat2;
GMat2 -> C;
GMat0 -> C -> GMat3
subgraph cluster {style=filled;color=azure2; A; GMat1; B; GMat2; C};
}
#+END_SRC
All operations are handled by the same backend.
*** :B_block:BMCOL:
:PROPERTIES:
:BEAMER_env: block
:BEAMER_col: 0.18
:END:
#+BEGIN_SRC dot :file "012-hetero-a.eps" :cmdline "-Kdot -Teps"
digraph G {
rankdir=TB;
ranksep=0.3;
node [shape=box margin=0 height=0.25];
A; B; C;
node [shape=ellipse];
GMat0;
GMat1;
GMat2;
GMat3;
GMat0 -> A -> GMat1 -> B -> GMat2;
GMat2 -> C;
GMat0 -> C -> GMat3
subgraph cluster_1 {style=filled;color=azure2; A; GMat1; B; }
subgraph cluster_2 {style=filled;color=ivory2; C};
}
#+END_SRC
~A~ & ~B~ are of backend ~1~, ~C~ is of backend ~2~.
*** :B_block:BMCOL:
:PROPERTIES:
:BEAMER_env: block
:BEAMER_col: 0.18
:END:
#+BEGIN_SRC dot :file "013-hetero-b.eps" :cmdline "-Kdot -Teps"
digraph G {
rankdir=TB;
ranksep=0.3;
node [shape=box margin=0 height=0.25];
A; B; C;
node [shape=ellipse];
GMat0;
GMat1;
GMat2;
GMat3;
GMat0 -> A -> GMat1 -> B -> GMat2;
GMat2 -> C;
GMat0 -> C -> GMat3
subgraph cluster_1 {style=filled;color=azure2; A};
subgraph cluster_2 {style=filled;color=ivory2; B};
subgraph cluster_3 {style=filled;color=azure2; C};
}
#+END_SRC
~A~ & ~C~ are of backend ~1~, ~B~ is of backend ~2~.
** Heterogeneity in G-API
*** Heterogeneity summary
- G-API automatically partitions its graph in subgraphs (called "islands")
based on the available kernels;
- Adjacent kernels taken from the same backend are "fused" into the same
"island";
- G-API implements a two-level execution model:
- Islands are executed at the top level by a G-API's *Executor*;
- Island internals are run at the bottom level by its *Backend*;
- G-API fully delegates the low-level execution and memory management to backends.
* Inference and Streaming
** Inference with G-API
*** In-graph inference example
- Starting with OpencV 4.2 (2019), G-API allows to integrate ~infer~
operations into the graph:
#+LaTeX: {\scriptsize
#+BEGIN_SRC C++
G_API_NET(ObjDetect, <cv::GMat(cv::GMat)>, "pdf.example.od");
cv::GMat in;
cv::GMat blob = cv::gapi::infer<ObjDetect>(bgr);
cv::GOpaque<cv::Size> size = cv::gapi::streaming::size(bgr);
cv::GArray<cv::Rect> objs = cv::gapi::streaming::parseSSD(blob, size);
cv::GComputation pipelne(cv::GIn(in), cv::GOut(objs));
#+END_SRC
#+LaTeX: }
- Starting with OpenCV 4.5 (2020), G-API will provide more streaming-
and NN-oriented operations out of the box.
** Inference with G-API
*** What is the difference?
- ~ObjDetect~ is not an operation, ~cv::gapi::infer<T>~ is;
- ~cv::gapi::infer<T>~ is a *generic* operation, where ~T=ObjDetect~ describes
the calling convention:
- How many inputs the network consumes,
- How many outputs the network produces.
- Inference data types are ~GMat~ only:
- Representing an image, then preprocessed automatically;
- Representing a blob (n-dimensional ~Mat~), then passed as-is.
- Inference *backends* only need to implement a single generic operation ~infer~.
** Inference with G-API
*** But how does it run?
- Since ~infer~ is an *Operation*, backends may provide *Kernels* implenting it;
- The only publicly available inference backend now is *OpenVINO™*:
- Brings its ~infer~ kernel atop of the Inference Engine;
- NN model data is passed through G-API compile arguments (like kernels);
- Every NN backend provides its own structure to configure the network (like
a kernel API).
** Inference with G-API
*** Passing OpenVINO™ parameters to G-API
- ~ObjDetect~ example:
#+LaTeX: {\footnotesize
#+BEGIN_SRC C++
auto face_net = cv::gapi::ie::Params<ObjDetect> {
face_xml_path, // path to the topology IR
face_bin_path, // path to the topology weights
face_device_string, // OpenVINO plugin (device) string
};
auto networks = cv::gapi::networks(face_net);
pipeline.compile(.., cv::compile_args(..., networks));
#+END_SRC
#+LaTeX: }
- ~AgeGender~ requires binding Op's outputs to NN layers:
#+LaTeX: {\footnotesize
#+BEGIN_SRC C++
auto age_net = cv::gapi::ie::Params<AgeGender> {
...
}.cfgOutputLayers({"age_conv3", "prob"}); // array<string,2> !
#+END_SRC
#+LaTeX: }
** Streaming with G-API
#+BEGIN_SRC dot :file 020-fd-demo.eps :cmdline "-Kdot -Teps"
digraph {
rankdir=LR;
node [shape=box];
cap [label=Capture];
dec [label=Decode];
res [label=Resize];
cnn [label=Infer];
vis [label=Visualize];
cap -> dec;
dec -> res;
res -> cnn;
cnn -> vis;
}
#+END_SRC
Anatomy of a regular video analytics application
** Streaming with G-API
#+BEGIN_SRC dot :file 021-fd-serial.eps :cmdline "-Kdot -Teps"
digraph {
node [shape=box margin=0 width=0.3 height=0.4]
nodesep=0.2;
rankdir=LR;
subgraph cluster0 {
colorscheme=blues9
pp [label="..." shape=plaintext];
v0 [label=V];
label="Frame N-1";
color=7;
}
subgraph cluster1 {
colorscheme=blues9
c1 [label=C];
d1 [label=D];
r1 [label=R];
i1 [label=I];
v1 [label=V];
label="Frame N";
color=6;
}
subgraph cluster2 {
colorscheme=blues9
c2 [label=C];
nn [label="..." shape=plaintext];
label="Frame N+1";
color=5;
}
c1 -> d1 -> r1 -> i1 -> v1;
pp-> v0;
v0 -> c1 [style=invis];
v1 -> c2 [style=invis];
c2 -> nn;
}
#+END_SRC
Serial execution of the sample video analytics application
** Streaming with G-API
:PROPERTIES:
:BEAMER_opt: shrink
:END:
#+BEGIN_SRC dot :file 022-fd-pipelined.eps :cmdline "-Kdot -Teps"
digraph {
nodesep=0.2;
ranksep=0.2;
node [margin=0 width=0.4 height=0.2];
node [shape=plaintext]
Camera [label="Camera:"];
GPU [label="GPU:"];
FPGA [label="FPGA:"];
CPU [label="CPU:"];
Time [label="Time:"];
t6 [label="T6"];
t7 [label="T7"];
t8 [label="T8"];
t9 [label="T9"];
t10 [label="T10"];
tnn [label="..."];
node [shape=box margin=0 width=0.4 height=0.4 colorscheme=blues9]
node [color=9] V3;
node [color=8] F4; V4;
node [color=7] DR5; F5; V5;
node [color=6] C6; DR6; F6; V6;
node [color=5] C7; DR7; F7; V7;
node [color=4] C8; DR8; F8;
node [color=3] C9; DR9;
node [color=2] C10;
{rank=same; rankdir=LR; Camera C6 C7 C8 C9 C10}
Camera -> C6 -> C7 -> C8 -> C9 -> C10 [style=invis];
{rank=same; rankdir=LR; GPU DR5 DR6 DR7 DR8 DR9}
GPU -> DR5 -> DR6 -> DR7 -> DR8 -> DR9 [style=invis];
C6 -> DR5 [style=invis];
C6 -> DR6 [constraint=false];
C7 -> DR7 [constraint=false];
C8 -> DR8 [constraint=false];
C9 -> DR9 [constraint=false];
{rank=same; rankdir=LR; FPGA F4 F5 F6 F7 F8}
FPGA -> F4 -> F5 -> F6 -> F7 -> F8 [style=invis];
DR5 -> F4 [style=invis];
DR5 -> F5 [constraint=false];
DR6 -> F6 [constraint=false];
DR7 -> F7 [constraint=false];
DR8 -> F8 [constraint=false];
{rank=same; rankdir=LR; CPU V3 V4 V5 V6 V7}
CPU -> V3 -> V4 -> V5 -> V6 -> V7 [style=invis];
F4 -> V3 [style=invis];
F4 -> V4 [constraint=false];
F5 -> V5 [constraint=false];
F6 -> V6 [constraint=false];
F7 -> V7 [constraint=false];
{rank=same; rankdir=LR; Time t6 t7 t8 t9 t10 tnn}
Time -> t6 -> t7 -> t8 -> t9 -> t10 -> tnn [style=invis];
CPU -> Time [style=invis];
V3 -> t6 [style=invis];
V4 -> t7 [style=invis];
V5 -> t8 [style=invis];
V6 -> t9 [style=invis];
V7 -> t10 [style=invis];
}
#+END_SRC
Pipelined execution for the video analytics application
** Streaming with G-API: Example
**** Serial mode (4.0) :B_block:BMCOL:
:PROPERTIES:
:BEAMER_env: block
:BEAMER_col: 0.45
:END:
#+LaTeX: {\tiny
#+BEGIN_SRC C++
pipeline = cv::GComputation(...);
cv::VideoCapture cap(input);
cv::Mat in_frame;
std::vector<cv::Rect> out_faces;
while (cap.read(in_frame)) {
pipeline.apply(cv::gin(in_frame),
cv::gout(out_faces),
cv::compile_args(kernels,
networks));
// Process results
...
}
#+END_SRC
#+LaTeX: }
**** Streaming mode (since 4.2) :B_block:BMCOL:
:PROPERTIES:
:BEAMER_env: block
:BEAMER_col: 0.45
:END:
#+LaTeX: {\tiny
#+BEGIN_SRC C++
pipeline = cv::GComputation(...);
auto in_src = cv::gapi::wip::make_src
<cv::gapi::wip::GCaptureSource>(input)
auto cc = pipeline.compileStreaming
(cv::compile_args(kernels, networks))
cc.setSource(cv::gin(in_src));
cc.start();
std::vector<cv::Rect> out_faces;
while (cc.pull(cv::gout(out_faces))) {
// Process results
...
}
#+END_SRC
#+LaTeX: }
**** More information
#+LaTeX: {\footnotesize
https://opencv.org/hybrid-cv-dl-pipelines-with-opencv-4-4-g-api/
#+LaTeX: }
* Latest features
** Latest features
*** Python API
- Initial Python3 binding is available now in ~master~ (future 4.5);
- Only basic CV functionality is supported (~core~ & ~imgproc~ namespaces,
selecting backends);
- Adding more programmability, inference, and streaming is next.
** Latest features
*** Python API
#+LaTeX: {\footnotesize
#+BEGIN_SRC Python
import numpy as np
import cv2 as cv
sz = (1280, 720)
in1 = np.random.randint(0, 100, sz).astype(np.uint8)
in2 = np.random.randint(0, 100, sz).astype(np.uint8)
g_in1 = cv.GMat()
g_in2 = cv.GMat()
g_out = cv.gapi.add(g_in1, g_in2)
gr = cv.GComputation(g_in1, g_in2, g_out)
pkg = cv.gapi.core.fluid.kernels()
out = gr.apply(in1, in2, args=cv.compile_args(pkg))
#+END_SRC
#+LaTeX: }
* Understanding the "G-Effect"
** Understanding the "G-Effect"
@ -384,15 +940,22 @@ speed-up on QVGA taken as 1.0).
* Resources on G-API
** Resources on G-API
:PROPERTIES:
:BEAMER_opt: shrink
:END:
*** Repository
- https://github.com/opencv/opencv (see ~modules/gapi~)
- Integral part of OpenCV starting version 4.0;
*** Article
- https://opencv.org/hybrid-cv-dl-pipelines-with-opencv-4-4-g-api/
*** Documentation
- https://docs.opencv.org/master/d0/d1e/gapi.html
- A tutorial and a class reference are there as well.
- https://docs.opencv.org/4.4.0/d0/d1e/gapi.html
*** Tutorials
- https://docs.opencv.org/4.4.0/df/d7e/tutorial_table_of_content_gapi.html
* Thank you!

View File

@ -24,10 +24,18 @@
#include <opencv2/gapi/gmat.hpp>
#include <opencv2/gapi/garray.hpp>
#include <opencv2/gapi/gscalar.hpp>
#include <opencv2/gapi/gopaque.hpp>
#include <opencv2/gapi/gframe.hpp>
#include <opencv2/gapi/gcomputation.hpp>
#include <opencv2/gapi/gcompiled.hpp>
#include <opencv2/gapi/gtyped.hpp>
#include <opencv2/gapi/gkernel.hpp>
#include <opencv2/gapi/operators.hpp>
// Include these files here to avoid cyclic dependency between
// Desync & GKernel & GComputation & GStreamingCompiled.
#include <opencv2/gapi/streaming/desync.hpp>
#include <opencv2/gapi/streaming/format.hpp>
#endif // OPENCV_GAPI_HPP

View File

@ -2,7 +2,7 @@
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Copyright (C) 2018 Intel Corporation
// Copyright (C) 2018-2020 Intel Corporation
#ifndef OPENCV_GAPI_CORE_HPP
@ -17,6 +17,7 @@
#include <opencv2/gapi/gmat.hpp>
#include <opencv2/gapi/gscalar.hpp>
#include <opencv2/gapi/gkernel.hpp>
#include <opencv2/gapi/streaming/format.hpp>
/** \defgroup gapi_core G-API Core functionality
@{
@ -26,6 +27,7 @@
@defgroup gapi_transform Graph API: Image and channel composition functions
@}
*/
namespace cv { namespace gapi {
namespace core {
using GMat2 = std::tuple<GMat,GMat>;
@ -308,6 +310,13 @@ namespace core {
}
};
G_TYPED_KERNEL(GCountNonZero, <GOpaque<int>(GMat)>, "org.opencv.core.matrixop.countNonZero") {
static GOpaqueDesc outMeta(GMatDesc in) {
GAPI_Assert(in.chan == 1);
return empty_gopaque_desc();
}
};
G_TYPED_KERNEL(GAddW, <GMat(GMat, double, GMat, double, double, int)>, "org.opencv.core.matrixop.addweighted") {
static GMatDesc outMeta(GMatDesc a, double, GMatDesc b, double, double, int ddepth) {
if (ddepth == -1)
@ -443,12 +452,6 @@ namespace core {
}
};
G_TYPED_KERNEL(GCopy, <GMat(GMat)>, "org.opencv.core.transform.copy") {
static GMatDesc outMeta(GMatDesc in) {
return in;
}
};
G_TYPED_KERNEL(GConcatHor, <GMat(GMat, GMat)>, "org.opencv.imgproc.transform.concatHor") {
static GMatDesc outMeta(GMatDesc l, GMatDesc r) {
return l.withSizeDelta(+r.size.width, 0);
@ -502,18 +505,93 @@ namespace core {
}
};
G_TYPED_KERNEL(GSize, <GOpaque<Size>(GMat)>, "org.opencv.core.size") {
static GOpaqueDesc outMeta(const GMatDesc&) {
return empty_gopaque_desc();
G_TYPED_KERNEL(
GKMeansND,
<std::tuple<GOpaque<double>,GMat,GMat>(GMat,int,GMat,TermCriteria,int,KmeansFlags)>,
"org.opencv.core.kmeansND") {
static std::tuple<GOpaqueDesc,GMatDesc,GMatDesc>
outMeta(const GMatDesc& in, int K, const GMatDesc& bestLabels, const TermCriteria&, int,
KmeansFlags flags) {
GAPI_Assert(in.depth == CV_32F);
std::vector<int> amount_n_dim = detail::checkVector(in);
int amount = amount_n_dim[0], dim = amount_n_dim[1];
if (amount == -1) // Mat with height != 1, width != 1, channels != 1 given
{ // which means that kmeans will consider the following:
amount = in.size.height;
dim = in.size.width * in.chan;
}
// kmeans sets these labels' sizes when no bestLabels given:
GMatDesc out_labels(CV_32S, 1, Size{1, amount});
// kmeans always sets these centers' sizes:
GMatDesc centers (CV_32F, 1, Size{dim, K});
if (flags & KMEANS_USE_INITIAL_LABELS)
{
GAPI_Assert(bestLabels.depth == CV_32S);
int labels_amount = detail::checkVector(bestLabels, 1u);
GAPI_Assert(labels_amount == amount);
out_labels = bestLabels; // kmeans preserves bestLabels' sizes if given
}
return std::make_tuple(empty_gopaque_desc(), out_labels, centers);
}
};
G_TYPED_KERNEL(GSizeR, <GOpaque<Size>(GOpaque<Rect>)>, "org.opencv.core.sizeR") {
static GOpaqueDesc outMeta(const GOpaqueDesc&) {
return empty_gopaque_desc();
G_TYPED_KERNEL(
GKMeansNDNoInit,
<std::tuple<GOpaque<double>,GMat,GMat>(GMat,int,TermCriteria,int,KmeansFlags)>,
"org.opencv.core.kmeansNDNoInit") {
static std::tuple<GOpaqueDesc,GMatDesc,GMatDesc>
outMeta(const GMatDesc& in, int K, const TermCriteria&, int, KmeansFlags flags) {
GAPI_Assert( !(flags & KMEANS_USE_INITIAL_LABELS) );
GAPI_Assert(in.depth == CV_32F);
std::vector<int> amount_n_dim = detail::checkVector(in);
int amount = amount_n_dim[0], dim = amount_n_dim[1];
if (amount == -1) // Mat with height != 1, width != 1, channels != 1 given
{ // which means that kmeans will consider the following:
amount = in.size.height;
dim = in.size.width * in.chan;
}
GMatDesc out_labels(CV_32S, 1, Size{1, amount});
GMatDesc centers (CV_32F, 1, Size{dim, K});
return std::make_tuple(empty_gopaque_desc(), out_labels, centers);
}
};
}
G_TYPED_KERNEL(GKMeans2D, <std::tuple<GOpaque<double>,GArray<int>,GArray<Point2f>>
(GArray<Point2f>,int,GArray<int>,TermCriteria,int,KmeansFlags)>,
"org.opencv.core.kmeans2D") {
static std::tuple<GOpaqueDesc,GArrayDesc,GArrayDesc>
outMeta(const GArrayDesc&,int,const GArrayDesc&,const TermCriteria&,int,KmeansFlags) {
return std::make_tuple(empty_gopaque_desc(), empty_array_desc(), empty_array_desc());
}
};
G_TYPED_KERNEL(GKMeans3D, <std::tuple<GOpaque<double>,GArray<int>,GArray<Point3f>>
(GArray<Point3f>,int,GArray<int>,TermCriteria,int,KmeansFlags)>,
"org.opencv.core.kmeans3D") {
static std::tuple<GOpaqueDesc,GArrayDesc,GArrayDesc>
outMeta(const GArrayDesc&,int,const GArrayDesc&,const TermCriteria&,int,KmeansFlags) {
return std::make_tuple(empty_gopaque_desc(), empty_array_desc(), empty_array_desc());
}
};
} // namespace core
namespace streaming {
// Operations for Streaming (declared in this header for convenience)
G_TYPED_KERNEL(GSize, <GOpaque<Size>(GMat)>, "org.opencv.streaming.size") {
static GOpaqueDesc outMeta(const GMatDesc&) {
return empty_gopaque_desc();
}
};
G_TYPED_KERNEL(GSizeR, <GOpaque<Size>(GOpaque<Rect>)>, "org.opencv.streaming.sizeR") {
static GOpaqueDesc outMeta(const GOpaqueDesc&) {
return empty_gopaque_desc();
}
};
} // namespace streaming
//! @addtogroup gapi_math
//! @{
@ -755,6 +833,7 @@ Supported matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref
@note Function textual ID is "org.opencv.core.math.mean"
@param src input matrix.
@sa countNonZero, min, max
*/
GAPI_EXPORTS_W GScalar mean(const GMat& src);
@ -856,7 +935,7 @@ Supported input matrix data types are @ref CV_8UC1, @ref CV_16UC1, @ref CV_16SC1
@note Function textual ID is "org.opencv.core.pixelwise.compare.cmpGT"
@param src1 first input matrix.
@param src2 second input matrix/scalar of the same depth as first input matrix.
@sa min, max, threshold, cmpLE, cmpGE, cmpLS
@sa min, max, threshold, cmpLE, cmpGE, cmpLT
*/
GAPI_EXPORTS GMat cmpGT(const GMat& src1, const GMat& src2);
/** @overload
@ -908,7 +987,7 @@ Supported input matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1,
@note Function textual ID is "org.opencv.core.pixelwise.compare.cmpGE"
@param src1 first input matrix.
@param src2 second input matrix/scalar of the same depth as first input matrix.
@sa min, max, threshold, cmpLE, cmpGT, cmpLS
@sa min, max, threshold, cmpLE, cmpGT, cmpLT
*/
GAPI_EXPORTS GMat cmpGE(const GMat& src1, const GMat& src2);
/** @overload
@ -934,7 +1013,7 @@ Supported input matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1,
@note Function textual ID is "org.opencv.core.pixelwise.compare.cmpLE"
@param src1 first input matrix.
@param src2 second input matrix/scalar of the same depth as first input matrix.
@sa min, max, threshold, cmpGT, cmpGE, cmpLS
@sa min, max, threshold, cmpGT, cmpGE, cmpLT
*/
GAPI_EXPORTS GMat cmpLE(const GMat& src1, const GMat& src2);
/** @overload
@ -1012,7 +1091,7 @@ Supported matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref
*/
GAPI_EXPORTS GMat bitwise_and(const GMat& src1, const GMat& src2);
/** @overload
@note Function textual ID is "org.opencv.core.pixelwise.compare.bitwise_andS"
@note Function textual ID is "org.opencv.core.pixelwise.bitwise_andS"
@param src1 first input matrix.
@param src2 scalar, which will be per-lemenetly conjuncted with elements of src1.
*/
@ -1036,7 +1115,7 @@ Supported matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref
*/
GAPI_EXPORTS GMat bitwise_or(const GMat& src1, const GMat& src2);
/** @overload
@note Function textual ID is "org.opencv.core.pixelwise.compare.bitwise_orS"
@note Function textual ID is "org.opencv.core.pixelwise.bitwise_orS"
@param src1 first input matrix.
@param src2 scalar, which will be per-lemenetly disjuncted with elements of src1.
*/
@ -1061,7 +1140,7 @@ Supported matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref
*/
GAPI_EXPORTS GMat bitwise_xor(const GMat& src1, const GMat& src2);
/** @overload
@note Function textual ID is "org.opencv.core.pixelwise.compare.bitwise_xorS"
@note Function textual ID is "org.opencv.core.pixelwise.bitwise_xorS"
@param src1 first input matrix.
@param src2 scalar, for which per-lemenet "logical or" operation on elements of src1 will be performed.
*/
@ -1069,6 +1148,7 @@ GAPI_EXPORTS GMat bitwise_xor(const GMat& src1, const GScalar& src2);
/** @brief Inverts every bit of an array.
The function bitwise_not calculates per-element bit-wise inversion of the input
matrix:
\f[\texttt{dst} (I) = \neg \texttt{src} (I)\f]
@ -1121,7 +1201,7 @@ Supported input matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1,
@note Function textual ID is "org.opencv.core.matrixop.min"
@param src1 first input matrix.
@param src2 second input matrix of the same size and depth as src1.
@sa max, compareEqual, compareLess, compareLessEqual
@sa max, cmpEQ, cmpLT, cmpLE
*/
GAPI_EXPORTS GMat min(const GMat& src1, const GMat& src2);
@ -1138,7 +1218,7 @@ Supported matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref
@note Function textual ID is "org.opencv.core.matrixop.max"
@param src1 first input matrix.
@param src2 second input matrix of the same size and depth as src1.
@sa min, compare, compareEqual, compareGreater, compareGreaterEqual
@sa min, compare, cmpEQ, cmpGT, cmpGE
*/
GAPI_EXPORTS GMat max(const GMat& src1, const GMat& src2);
@ -1184,10 +1264,23 @@ Supported matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref
@note Function textual ID is "org.opencv.core.matrixop.sum"
@param src input matrix.
@sa min, max
@sa countNonZero, mean, min, max
*/
GAPI_EXPORTS GScalar sum(const GMat& src);
/** @brief Counts non-zero array elements.
The function returns the number of non-zero elements in src :
\f[\sum _{I: \; \texttt{src} (I) \ne0 } 1\f]
Supported matrix data types are @ref CV_8UC1, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.
@note Function textual ID is "org.opencv.core.matrixop.countNonZero"
@param src input single-channel matrix.
@sa mean, min, max
*/
GAPI_EXPORTS GOpaque<int> countNonZero(const GMat& src);
/** @brief Calculates the weighted sum of two matrices.
The function addWeighted calculates the weighted sum of two matrices as follows:
@ -1324,14 +1417,14 @@ Output matrix must be of the same size and depth as src.
types.
@param type thresholding type (see the cv::ThresholdTypes).
@sa min, max, cmpGT, cmpLE, cmpGE, cmpLS
@sa min, max, cmpGT, cmpLE, cmpGE, cmpLT
*/
GAPI_EXPORTS GMat threshold(const GMat& src, const GScalar& thresh, const GScalar& maxval, int type);
/** @overload
This function applicable for all threshold types except CV_THRESH_OTSU and CV_THRESH_TRIANGLE
@note Function textual ID is "org.opencv.core.matrixop.thresholdOT"
*/
GAPI_EXPORTS std::tuple<GMat, GScalar> threshold(const GMat& src, const GScalar& maxval, int type);
GAPI_EXPORTS_W std::tuple<GMat, GScalar> threshold(const GMat& src, const GScalar& maxval, int type);
/** @brief Applies a range-level threshold to each matrix element.
@ -1411,41 +1504,77 @@ Output image size will have the size dsize, the depth of output is the same as o
*/
GAPI_EXPORTS GMatP resizeP(const GMatP& src, const Size& dsize, int interpolation = cv::INTER_LINEAR);
/** @brief Creates one 3-channel (4-channel) matrix out of 3(4) single-channel ones.
/** @brief Creates one 4-channel matrix out of 4 single-channel ones.
The function merges several matrices to make a single multi-channel matrix. That is, each
element of the output matrix will be a concatenation of the elements of the input matrices, where
elements of i-th input matrix are treated as mv[i].channels()-element vectors.
Input matrix must be of @ref CV_8UC3 (@ref CV_8UC4) type.
Output matrix must be of @ref CV_8UC4 type.
The function split3/split4 does the reverse operation.
The function split4 does the reverse operation.
@note Function textual ID for merge3 is "org.opencv.core.transform.merge3"
@note Function textual ID for merge4 is "org.opencv.core.transform.merge4"
@note
- Function textual ID is "org.opencv.core.transform.merge4"
@param src1 first input matrix to be merged
@param src2 second input matrix to be merged
@param src3 third input matrix to be merged
@param src4 fourth input matrix to be merged
@sa split4, split3
@param src1 first input @ref CV_8UC1 matrix to be merged.
@param src2 second input @ref CV_8UC1 matrix to be merged.
@param src3 third input @ref CV_8UC1 matrix to be merged.
@param src4 fourth input @ref CV_8UC1 matrix to be merged.
@sa merge3, split4, split3
*/
GAPI_EXPORTS GMat merge4(const GMat& src1, const GMat& src2, const GMat& src3, const GMat& src4);
/** @brief Creates one 3-channel matrix out of 3 single-channel ones.
The function merges several matrices to make a single multi-channel matrix. That is, each
element of the output matrix will be a concatenation of the elements of the input matrices, where
elements of i-th input matrix are treated as mv[i].channels()-element vectors.
Output matrix must be of @ref CV_8UC3 type.
The function split3 does the reverse operation.
@note
- Function textual ID is "org.opencv.core.transform.merge3"
@param src1 first input @ref CV_8UC1 matrix to be merged.
@param src2 second input @ref CV_8UC1 matrix to be merged.
@param src3 third input @ref CV_8UC1 matrix to be merged.
@sa merge4, split4, split3
*/
GAPI_EXPORTS GMat merge3(const GMat& src1, const GMat& src2, const GMat& src3);
/** @brief Divides a 3-channel (4-channel) matrix into 3(4) single-channel matrices.
/** @brief Divides a 4-channel matrix into 4 single-channel matrices.
The function splits a 3-channel (4-channel) matrix into 3(4) single-channel matrices:
The function splits a 4-channel matrix into 4 single-channel matrices:
\f[\texttt{mv} [c](I) = \texttt{src} (I)_c\f]
All output matrices must be in @ref CV_8UC1.
All output matrices must be of @ref CV_8UC1 type.
@note Function textual for split3 ID is "org.opencv.core.transform.split3"
@note Function textual for split4 ID is "org.opencv.core.transform.split4"
The function merge4 does the reverse operation.
@param src input @ref CV_8UC4 (@ref CV_8UC3) matrix.
@sa merge3, merge4
@note
- Function textual ID is "org.opencv.core.transform.split4"
@param src input @ref CV_8UC4 matrix.
@sa split3, merge3, merge4
*/
GAPI_EXPORTS std::tuple<GMat, GMat, GMat,GMat> split4(const GMat& src);
/** @brief Divides a 3-channel matrix into 3 single-channel matrices.
The function splits a 3-channel matrix into 3 single-channel matrices:
\f[\texttt{mv} [c](I) = \texttt{src} (I)_c\f]
All output matrices must be of @ref CV_8UC1 type.
The function merge3 does the reverse operation.
@note
- Function textual ID is "org.opencv.core.transform.split3"
@param src input @ref CV_8UC3 matrix.
@sa split4, merge3, merge4
*/
GAPI_EXPORTS_W std::tuple<GMat, GMat, GMat> split3(const GMat& src);
/** @brief Applies a generic geometrical transformation to an image.
@ -1463,21 +1592,21 @@ convert from floating to fixed-point representations of a map is that they can y
cvFloor(y)) and \f$map_2\f$ contains indices in a table of interpolation coefficients.
Output image must be of the same size and depth as input one.
@note Function textual ID is "org.opencv.core.transform.remap"
@note
- Function textual ID is "org.opencv.core.transform.remap"
- Due to current implementation limitations the size of an input and output images should be less than 32767x32767.
@param src Source image.
@param map1 The first map of either (x,y) points or just x values having the type CV_16SC2,
CV_32FC1, or CV_32FC2.
@param map2 The second map of y values having the type CV_16UC1, CV_32FC1, or none (empty map
if map1 is (x,y) points), respectively.
@param interpolation Interpolation method (see cv::InterpolationFlags). The method INTER_AREA is
not supported by this function.
@param interpolation Interpolation method (see cv::InterpolationFlags). The methods #INTER_AREA
and #INTER_LINEAR_EXACT are not supported by this function.
@param borderMode Pixel extrapolation method (see cv::BorderTypes). When
borderMode=BORDER_TRANSPARENT, it means that the pixels in the destination image that
corresponds to the "outliers" in the source image are not modified by the function.
@param borderValue Value used in case of a constant border. By default, it is 0.
@note
Due to current implementation limitations the size of an input and output images should be less than 32767x32767.
*/
GAPI_EXPORTS GMat remap(const GMat& src, const Mat& map1, const Mat& map2,
int interpolation, int borderMode = BORDER_CONSTANT,
@ -1534,19 +1663,6 @@ Output matrix must be of the same depth as input one, size is specified by given
*/
GAPI_EXPORTS GMat crop(const GMat& src, const Rect& rect);
/** @brief Copies a matrix.
Copies an input array. Works as a regular Mat::clone but happens in-graph.
Mainly is used to workaround some existing limitations (e.g. to forward an input frame to outputs
in the streaming mode). Will be deprecated and removed in the future.
@note Function textual ID is "org.opencv.core.transform.copy"
@param src input matrix.
@sa crop
*/
GAPI_EXPORTS GMat copy(const GMat& src);
/** @brief Applies horizontal concatenation to given matrices.
The function horizontally concatenates two GMat matrices (with the same number of rows).
@ -1732,9 +1848,83 @@ GAPI_EXPORTS GMat warpAffine(const GMat& src, const Mat& M, const Size& dsize, i
int borderMode = cv::BORDER_CONSTANT, const Scalar& borderValue = Scalar());
//! @} gapi_transform
/** @brief Finds centers of clusters and groups input samples around the clusters.
The function kmeans implements a k-means algorithm that finds the centers of K clusters
and groups the input samples around the clusters. As an output, \f$\texttt{bestLabels}_i\f$
contains a 0-based cluster index for the \f$i^{th}\f$ sample.
@note
- Function textual ID is "org.opencv.core.kmeansND"
- In case of an N-dimentional points' set given, input GMat can have the following traits:
2 dimensions, a single row or column if there are N channels,
or N columns if there is a single channel. Mat should have @ref CV_32F depth.
- Although, if GMat with height != 1, width != 1, channels != 1 given as data, n-dimensional
samples are considered given in amount of A, where A = height, n = width * channels.
- In case of GMat given as data:
- the output labels are returned as 1-channel GMat with sizes
width = 1, height = A, where A is samples amount, or width = bestLabels.width,
height = bestLabels.height if bestLabels given;
- the cluster centers are returned as 1-channel GMat with sizes
width = n, height = K, where n is samples' dimentionality and K is clusters' amount.
- As one of possible usages, if you want to control the initial labels for each attempt
by yourself, you can utilize just the core of the function. To do that, set the number
of attempts to 1, initialize labels each time using a custom algorithm, pass them with the
( flags = #KMEANS_USE_INITIAL_LABELS ) flag, and then choose the best (most-compact) clustering.
@param data Data for clustering. An array of N-Dimensional points with float coordinates is needed.
Function can take GArray<Point2f>, GArray<Point3f> for 2D and 3D cases or GMat for any
dimentionality and channels.
@param K Number of clusters to split the set by.
@param bestLabels Optional input integer array that can store the supposed initial cluster indices
for every sample. Used when ( flags = #KMEANS_USE_INITIAL_LABELS ) flag is set.
@param criteria The algorithm termination criteria, that is, the maximum number of iterations
and/or the desired accuracy. The accuracy is specified as criteria.epsilon. As soon as each of
the cluster centers moves by less than criteria.epsilon on some iteration, the algorithm stops.
@param attempts Flag to specify the number of times the algorithm is executed using different
initial labellings. The algorithm returns the labels that yield the best compactness (see the first
function return value).
@param flags Flag that can take values of cv::KmeansFlags .
@return
- Compactness measure that is computed as
\f[\sum _i \| \texttt{samples} _i - \texttt{centers} _{ \texttt{labels} _i} \| ^2\f]
after every attempt. The best (minimum) value is chosen and the corresponding labels and the
compactness value are returned by the function.
- Integer array that stores the cluster indices for every sample.
- Array of the cluster centers.
*/
GAPI_EXPORTS std::tuple<GOpaque<double>,GMat,GMat>
kmeans(const GMat& data, const int K, const GMat& bestLabels,
const TermCriteria& criteria, const int attempts, const KmeansFlags flags);
/** @overload
@note
- Function textual ID is "org.opencv.core.kmeansNDNoInit"
- #KMEANS_USE_INITIAL_LABELS flag must not be set while using this overload.
*/
GAPI_EXPORTS std::tuple<GOpaque<double>,GMat,GMat>
kmeans(const GMat& data, const int K, const TermCriteria& criteria, const int attempts,
const KmeansFlags flags);
/** @overload
@note Function textual ID is "org.opencv.core.kmeans2D"
*/
GAPI_EXPORTS std::tuple<GOpaque<double>,GArray<int>,GArray<Point2f>>
kmeans(const GArray<Point2f>& data, const int K, const GArray<int>& bestLabels,
const TermCriteria& criteria, const int attempts, const KmeansFlags flags);
/** @overload
@note Function textual ID is "org.opencv.core.kmeans3D"
*/
GAPI_EXPORTS std::tuple<GOpaque<double>,GArray<int>,GArray<Point3f>>
kmeans(const GArray<Point3f>& data, const int K, const GArray<int>& bestLabels,
const TermCriteria& criteria, const int attempts, const KmeansFlags flags);
namespace streaming {
/** @brief Gets dimensions from Mat.
@note Function textual ID is "org.opencv.core.size"
@note Function textual ID is "org.opencv.streaming.size"
@param src Input tensor
@return Size (tensor dimensions).
@ -1744,12 +1934,13 @@ GAPI_EXPORTS GOpaque<Size> size(const GMat& src);
/** @overload
Gets dimensions from rectangle.
@note Function textual ID is "org.opencv.core.sizeR"
@note Function textual ID is "org.opencv.streaming.sizeR"
@param r Input rectangle.
@return Size (rectangle dimensions).
*/
GAPI_EXPORTS GOpaque<Size> size(const GOpaque<Rect>& r);
} //namespace streaming
} //namespace gapi
} //namespace cv

View File

@ -101,6 +101,7 @@ public:
const cv::Scalar& inVal(int input);
cv::Scalar& outValR(int output); // FIXME: Avoid cv::Scalar s = ctx.outValR()
cv::MediaFrame& outFrame(int output);
template<typename T> std::vector<T>& outVecR(int output) // FIXME: the same issue
{
return outVecRef(output).wref<T>();
@ -164,7 +165,7 @@ template<> struct get_in<cv::GMatP>
};
template<> struct get_in<cv::GFrame>
{
static cv::Mat get(GCPUContext &ctx, int idx) { return get_in<cv::GMat>::get(ctx, idx); }
static cv::MediaFrame get(GCPUContext &ctx, int idx) { return ctx.inArg<cv::MediaFrame>(idx); }
};
template<> struct get_in<cv::GScalar>
{
@ -258,6 +259,13 @@ template<> struct get_out<cv::GScalar>
return ctx.outValR(idx);
}
};
template<> struct get_out<cv::GFrame>
{
static cv::MediaFrame& get(GCPUContext &ctx, int idx)
{
return ctx.outFrame(idx);
}
};
template<typename U> struct get_out<cv::GArray<U>>
{
static std::vector<U>& get(GCPUContext &ctx, int idx)
@ -271,6 +279,11 @@ template<> struct get_out<cv::GArray<cv::GMat> >: public get_out<cv::GArray<cv::
{
};
// FIXME(dm): GArray<vector<U>>/GArray<GArray<U>> conversion should be done more gracefully in the system
template<typename U> struct get_out<cv::GArray<cv::GArray<U>> >: public get_out<cv::GArray<std::vector<U>> >
{
};
template<typename U> struct get_out<cv::GOpaque<U>>
{
static U& get(GCPUContext &ctx, int idx)
@ -443,7 +456,7 @@ struct OCVStCallHelper<Impl, std::tuple<Ins...>, std::tuple<Outs...>> :
template<class Impl, class K>
class GCPUKernelImpl: public cv::detail::KernelTag
{
using CallHelper = detail::OCVCallHelper<Impl, typename K::InArgs, typename K::OutArgs>;
using CallHelper = cv::detail::OCVCallHelper<Impl, typename K::InArgs, typename K::OutArgs>;
public:
using API = K;
@ -497,7 +510,7 @@ private:
template<typename K, typename Callable>
gapi::cpu::GOCVFunctor gapi::cpu::ocv_kernel(Callable& c)
{
using P = detail::OCVCallHelper<Callable, typename K::InArgs, typename K::OutArgs>;
using P = cv::detail::OCVCallHelper<Callable, typename K::InArgs, typename K::OutArgs>;
return GOCVFunctor{ K::id()
, &K::getOutMeta
, std::bind(&P::callFunctor, std::placeholders::_1, std::ref(c))
@ -507,7 +520,7 @@ gapi::cpu::GOCVFunctor gapi::cpu::ocv_kernel(Callable& c)
template<typename K, typename Callable>
gapi::cpu::GOCVFunctor gapi::cpu::ocv_kernel(const Callable& c)
{
using P = detail::OCVCallHelper<Callable, typename K::InArgs, typename K::OutArgs>;
using P = cv::detail::OCVCallHelper<Callable, typename K::InArgs, typename K::OutArgs>;
return GOCVFunctor{ K::id()
, &K::getOutMeta
, std::bind(&P::callFunctor, std::placeholders::_1, c)

View File

@ -9,11 +9,14 @@
#define OPENCV_GAPI_GARG_HPP
#include <vector>
#include <unordered_map>
#include <type_traits>
#include <opencv2/gapi/opencv_includes.hpp>
#include <opencv2/gapi/own/mat.hpp>
#include <opencv2/gapi/media.hpp>
#include <opencv2/gapi/util/util.hpp>
#include <opencv2/gapi/util/any.hpp>
#include <opencv2/gapi/util/variant.hpp>
@ -21,9 +24,11 @@
#include <opencv2/gapi/gscalar.hpp>
#include <opencv2/gapi/garray.hpp>
#include <opencv2/gapi/gopaque.hpp>
#include <opencv2/gapi/gframe.hpp>
#include <opencv2/gapi/gtype_traits.hpp>
#include <opencv2/gapi/gmetaarg.hpp>
#include <opencv2/gapi/streaming/source.hpp>
#include <opencv2/gapi/rmat.hpp>
namespace cv {
@ -90,16 +95,73 @@ using GArgs = std::vector<GArg>;
// FIXME: Express as M<GProtoArg...>::type
// FIXME: Move to a separate file!
using GRunArg = util::variant<
using GRunArgBase = util::variant<
#if !defined(GAPI_STANDALONE)
cv::UMat,
#endif // !defined(GAPI_STANDALONE)
cv::RMat,
cv::gapi::wip::IStreamSource::Ptr,
cv::Mat,
cv::Scalar,
cv::detail::VectorRef,
cv::detail::OpaqueRef
cv::detail::OpaqueRef,
cv::MediaFrame
>;
namespace detail {
template<typename,typename>
struct in_variant;
template<typename T, typename... Types>
struct in_variant<T, util::variant<Types...> >
: std::integral_constant<bool, cv::detail::contains<T, Types...>::value > {
};
} // namespace detail
struct GAPI_EXPORTS GRunArg: public GRunArgBase
{
// Metadata information here
using Meta = std::unordered_map<std::string, util::any>;
Meta meta;
// Mimic the old GRunArg semantics here, old of the times when
// GRunArg was an alias to variant<>
GRunArg();
GRunArg(const cv::GRunArg &arg);
GRunArg(cv::GRunArg &&arg);
GRunArg& operator= (const GRunArg &arg);
GRunArg& operator= (GRunArg &&arg);
template <typename T>
GRunArg(const T &t,
const Meta &m = Meta{},
typename std::enable_if< detail::in_variant<T, GRunArgBase>::value, int>::type = 0)
: GRunArgBase(t)
, meta(m)
{
}
template <typename T>
GRunArg(T &&t,
const Meta &m = Meta{},
typename std::enable_if< detail::in_variant<T, GRunArgBase>::value, int>::type = 0)
: GRunArgBase(std::move(t))
, meta(m)
{
}
template <typename T> auto operator= (const T &t)
-> typename std::enable_if< detail::in_variant<T, GRunArgBase>::value, cv::GRunArg>::type&
{
GRunArgBase::operator=(t);
return *this;
}
template <typename T> auto operator= (T&& t)
-> typename std::enable_if< detail::in_variant<T, GRunArgBase>::value, cv::GRunArg>::type&
{
GRunArgBase::operator=(std::move(t));
return *this;
}
};
using GRunArgs = std::vector<GRunArg>;
// TODO: Think about the addition operator
@ -124,11 +186,13 @@ namespace gapi
namespace wip
{
/**
* @brief This aggregate type represents all types which G-API can handle (via variant).
* @brief This aggregate type represents all types which G-API can
* handle (via variant).
*
* It only exists to overcome C++ language limitations (where a `using`-defined class can't be forward-declared).
* It only exists to overcome C++ language limitations (where a
* `using`-defined class can't be forward-declared).
*/
struct Data: public GRunArg
struct GAPI_EXPORTS Data: public GRunArg
{
using GRunArg::GRunArg;
template <typename T>
@ -144,7 +208,9 @@ using GRunArgP = util::variant<
cv::UMat*,
#endif // !defined(GAPI_STANDALONE)
cv::Mat*,
cv::RMat*,
cv::Scalar*,
cv::MediaFrame*,
cv::detail::VectorRef,
cv::detail::OpaqueRef
>;

View File

@ -284,6 +284,14 @@ namespace detail
return static_cast<VectorRefT<T>&>(*m_ref).rref();
}
// Check if was created for/from std::vector<T>
template <typename T> bool holds() const
{
if (!m_ref) return false;
using U = typename std::decay<T>::type;
return dynamic_cast<VectorRefT<U>*>(m_ref.get()) != nullptr;
}
void mov(VectorRef &v)
{
m_ref->mov(*v.m_ref);
@ -341,15 +349,18 @@ public:
explicit GArray(detail::GArrayU &&ref) // GArrayU-based constructor
: m_ref(ref) { putDetails(); } // (used by GCall, not for users)
detail::GArrayU strip() const { return m_ref; }
/// @private
detail::GArrayU strip() const {
return m_ref;
}
/// @private
static void VCtor(detail::VectorRef& vref) {
vref.reset<HT>();
}
private:
static void VCTor(detail::VectorRef& vref) {
vref.reset<HT>();
vref.storeKind<HT>();
}
void putDetails() {
m_ref.setConstructFcn(&VCTor);
m_ref.setConstructFcn(&VCtor);
m_ref.specifyType<HT>(); // FIXME: to unify those 2 to avoid excessive dynamic_cast
m_ref.storeKind<HT>(); //
}
@ -357,6 +368,8 @@ private:
detail::GArrayU m_ref;
};
using GArrayP2f = GArray<cv::Point2f>;
/** @} */
} // namespace cv

View File

@ -11,6 +11,7 @@
#include <opencv2/gapi/garg.hpp> // GArg
#include <opencv2/gapi/gmat.hpp> // GMat
#include <opencv2/gapi/gscalar.hpp> // GScalar
#include <opencv2/gapi/gframe.hpp> // GFrame
#include <opencv2/gapi/garray.hpp> // GArray<T>
#include <opencv2/gapi/gopaque.hpp> // GOpaque<T>
@ -41,6 +42,7 @@ public:
GMat yield (int output = 0);
GMatP yieldP (int output = 0);
GScalar yieldScalar(int output = 0);
GFrame yieldFrame (int output = 0);
template<class T> GArray<T> yieldArray(int output = 0)
{
@ -56,11 +58,16 @@ public:
Priv& priv();
const Priv& priv() const;
protected:
std::shared_ptr<Priv> m_priv;
// GKernel and params can be modified, it's needed for infer<Generic>,
// because information about output shapes doesn't exist in compile time
GKernel& kernel();
cv::util::any& params();
void setArgs(std::vector<GArg> &&args);
protected:
std::shared_ptr<Priv> m_priv;
// Public versions return a typed array or opaque, those are implementation details
detail::GArrayU yieldArray(int output = 0);
detail::GOpaqueU yieldOpaque(int output = 0);

View File

@ -19,6 +19,7 @@
#include <opencv2/gapi/own/exports.hpp>
#include <opencv2/gapi/own/assert.hpp>
#include <opencv2/gapi/render/render_types.hpp>
#include <opencv2/gapi/s11n/base.hpp>
namespace cv {
@ -44,12 +45,16 @@ namespace detail
CV_BOOL, // bool user G-API data
CV_INT, // int user G-API data
CV_DOUBLE, // double user G-API data
CV_FLOAT, // float user G-API data
CV_UINT64, // uint64_t user G-API data
CV_STRING, // std::string user G-API data
CV_POINT, // cv::Point user G-API data
CV_POINT2F, // cv::Point2f user G-API data
CV_SIZE, // cv::Size user G-API data
CV_RECT, // cv::Rect user G-API data
CV_SCALAR, // cv::Scalar user G-API data
CV_MAT, // cv::Mat user G-API data
CV_PRIM, // cv::gapi::wip::draw::Prim user G-API data
CV_DRAW_PRIM, // cv::gapi::wip::draw::Prim user G-API data
};
// Type traits helper which simplifies the extraction of kind from type
@ -57,19 +62,24 @@ namespace detail
template<typename T> struct GOpaqueTraits { static constexpr const OpaqueKind kind = OpaqueKind::CV_UNKNOWN; };
template<> struct GOpaqueTraits<int> { static constexpr const OpaqueKind kind = OpaqueKind::CV_INT; };
template<> struct GOpaqueTraits<double> { static constexpr const OpaqueKind kind = OpaqueKind::CV_DOUBLE; };
template<> struct GOpaqueTraits<cv::Size> { static constexpr const OpaqueKind kind = OpaqueKind::CV_SIZE; };
template<> struct GOpaqueTraits<float> { static constexpr const OpaqueKind kind = OpaqueKind::CV_FLOAT; };
template<> struct GOpaqueTraits<uint64_t> { static constexpr const OpaqueKind kind = OpaqueKind::CV_UINT64; };
template<> struct GOpaqueTraits<bool> { static constexpr const OpaqueKind kind = OpaqueKind::CV_BOOL; };
template<> struct GOpaqueTraits<std::string> { static constexpr const OpaqueKind kind = OpaqueKind::CV_STRING; };
template<> struct GOpaqueTraits<cv::Size> { static constexpr const OpaqueKind kind = OpaqueKind::CV_SIZE; };
template<> struct GOpaqueTraits<cv::Scalar> { static constexpr const OpaqueKind kind = OpaqueKind::CV_SCALAR; };
template<> struct GOpaqueTraits<cv::Point> { static constexpr const OpaqueKind kind = OpaqueKind::CV_POINT; };
template<> struct GOpaqueTraits<cv::Point2f> { static constexpr const OpaqueKind kind = OpaqueKind::CV_POINT2F; };
template<> struct GOpaqueTraits<cv::Mat> { static constexpr const OpaqueKind kind = OpaqueKind::CV_MAT; };
template<> struct GOpaqueTraits<cv::Rect> { static constexpr const OpaqueKind kind = OpaqueKind::CV_RECT; };
template<> struct GOpaqueTraits<cv::GMat> { static constexpr const OpaqueKind kind = OpaqueKind::CV_MAT; };
template<> struct GOpaqueTraits<cv::gapi::wip::draw::Prim>
{ static constexpr const OpaqueKind kind = OpaqueKind::CV_PRIM; };
// GArray is not supporting bool type for now due to difference in std::vector<bool> implementation
using GOpaqueTraitsArrayTypes = std::tuple<int, double, cv::Size, cv::Scalar, cv::Point, cv::Mat, cv::Rect, cv::gapi::wip::draw::Prim>;
{ static constexpr const OpaqueKind kind = OpaqueKind::CV_DRAW_PRIM; };
using GOpaqueTraitsArrayTypes = std::tuple<int, double, float, uint64_t, bool, std::string, cv::Size, cv::Scalar, cv::Point, cv::Point2f,
cv::Mat, cv::Rect, cv::gapi::wip::draw::Prim>;
// GOpaque is not supporting cv::Mat and cv::Scalar since there are GScalar and GMat types
using GOpaqueTraitsOpaqueTypes = std::tuple<bool, int, double, cv::Size, cv::Point, cv::Rect, cv::gapi::wip::draw::Prim>;
using GOpaqueTraitsOpaqueTypes = std::tuple<int, double, float, uint64_t, bool, std::string, cv::Size, cv::Point, cv::Point2f, cv::Rect,
cv::gapi::wip::draw::Prim>;
} // namespace detail
// This definition is here because it is reused by both public(?) and internal
@ -87,6 +97,15 @@ enum class GShape: int
GFRAME,
};
namespace gapi {
namespace s11n {
namespace detail {
template<typename T> struct wrap_serialize;
} // namespace detail
} // namespace s11n
} // namespace gapi
struct GCompileArg;
namespace detail {
@ -132,7 +151,7 @@ namespace detail {
* passed in (a variadic template parameter pack) into a vector of
* cv::GCompileArg objects.
*/
struct GAPI_EXPORTS_W_SIMPLE GCompileArg
struct GCompileArg
{
public:
// NB: Required for pythnon bindings
@ -144,6 +163,9 @@ public:
template<typename T, typename std::enable_if<!detail::is_compile_arg<T>::value, int>::type = 0>
explicit GCompileArg(T &&t)
: tag(detail::CompileArgTag<typename std::decay<T>::type>::tag())
, serializeF(cv::gapi::s11n::detail::has_S11N_spec<T>::value ?
&cv::gapi::s11n::detail::wrap_serialize<T>::serialize :
nullptr)
, arg(t)
{
}
@ -158,7 +180,16 @@ public:
return util::any_cast<T>(arg);
}
void serialize(cv::gapi::s11n::IOStream& os) const
{
if (serializeF)
{
serializeF(os, *this);
}
}
private:
std::function<void(cv::gapi::s11n::IOStream&, const GCompileArg&)> serializeF;
util::any arg;
};
@ -173,12 +204,12 @@ template<typename... Ts> GCompileArgs compile_args(Ts&&... args)
return GCompileArgs{ GCompileArg(args)... };
}
namespace gapi
{
/**
* @brief Retrieves particular compilation argument by its type from
* cv::GCompileArgs
*/
namespace gapi
{
template<typename T>
inline cv::util::optional<T> getCompileArg(const cv::GCompileArgs &args)
{
@ -191,6 +222,19 @@ inline cv::util::optional<T> getCompileArg(const cv::GCompileArgs &args)
}
return cv::util::optional<T>();
}
namespace s11n {
namespace detail {
template<typename T> struct wrap_serialize
{
static void serialize(IOStream& os, const GCompileArg& arg)
{
using DT = typename std::decay<T>::type;
S11N<DT>::serialize(os, arg.get<DT>());
}
};
} // namespace detail
} // namespace s11n
} // namespace gapi
/**

View File

@ -37,14 +37,12 @@ namespace detail
}
// Forward-declare the serialization objects
namespace gimpl {
namespace gapi {
namespace s11n {
namespace I {
struct IStream;
struct OStream;
} // namespace I
struct IIStream;
struct IOStream;
} // namespace s11n
} // namespace gimpl
} // namespace gapi
/**
* \addtogroup gapi_main_classes
@ -259,6 +257,9 @@ public:
*/
void apply(GRunArgs &&ins, GRunArgsP &&outs, GCompileArgs &&args = {}); // Arg-to-arg overload
/// @private -- Exclude this function from OpenCV documentation
GAPI_WRAP GRunArgs apply(GRunArgs &&ins, GCompileArgs &&args = {});
/// @private -- Exclude this function from OpenCV documentation
void apply(const std::vector<cv::Mat>& ins, // Compatibility overload
const std::vector<cv::Mat>& outs,
@ -286,7 +287,7 @@ public:
* @param args compilation arguments for underlying compilation
* process.
*/
GAPI_WRAP void apply(cv::Mat in, CV_OUT cv::Scalar &out, GCompileArgs &&args = {}); // Unary overload (scalar)
void apply(cv::Mat in, cv::Scalar &out, GCompileArgs &&args = {}); // Unary overload (scalar)
/**
* @brief Execute a binary computation (with compilation on the fly)
@ -298,7 +299,7 @@ public:
* @param args compilation arguments for underlying compilation
* process.
*/
GAPI_WRAP void apply(cv::Mat in1, cv::Mat in2, CV_OUT cv::Mat &out, GCompileArgs &&args = {}); // Binary overload
void apply(cv::Mat in1, cv::Mat in2, cv::Mat &out, GCompileArgs &&args = {}); // Binary overload
/**
* @brief Execute an binary computation (with compilation on the fly)
@ -435,7 +436,7 @@ public:
*
* @sa @ref gapi_compile_args
*/
GStreamingCompiled compileStreaming(GMetaArgs &&in_metas, GCompileArgs &&args = {});
GAPI_WRAP GStreamingCompiled compileStreaming(GMetaArgs &&in_metas, GCompileArgs &&args = {});
/**
* @brief Compile the computation for streaming mode.
@ -456,7 +457,7 @@ public:
*
* @sa @ref gapi_compile_args
*/
GStreamingCompiled compileStreaming(GCompileArgs &&args = {});
GAPI_WRAP GStreamingCompiled compileStreaming(GCompileArgs &&args = {});
// 2. Direct metadata version
/**
@ -506,9 +507,9 @@ public:
/// @private
const Priv& priv() const;
/// @private
explicit GComputation(cv::gimpl::s11n::I::IStream &);
explicit GComputation(cv::gapi::s11n::IIStream &);
/// @private
void serialize(cv::gimpl::s11n::I::OStream &) const;
void serialize(cv::gapi::s11n::IOStream &) const;
protected:
@ -528,6 +529,7 @@ protected:
GCompileArgs comp_args = std::get<sizeof...(Ts)-1>(meta_and_compile_args);
return compileStreaming(std::move(meta_args), std::move(comp_args));
}
void recompile(GMetaArgs&& in_metas, GCompileArgs &&args);
/// @private
std::shared_ptr<Priv> m_priv;
};

View File

@ -42,16 +42,29 @@ private:
};
/** @} */
enum class MediaFormat: int
{
BGR = 0,
NV12,
};
/**
* \addtogroup gapi_meta_args
* @{
*/
struct GAPI_EXPORTS GFrameDesc
{
MediaFormat fmt;
cv::Size size;
bool operator== (const GFrameDesc &) const;
};
static inline GFrameDesc empty_gframe_desc() { return GFrameDesc{}; }
/** @} */
class MediaFrame;
GAPI_EXPORTS GFrameDesc descr_of(const MediaFrame &frame);
GAPI_EXPORTS std::ostream& operator<<(std::ostream& os, const cv::GFrameDesc &desc);
} // namespace cv

View File

@ -26,8 +26,16 @@
namespace cv {
using GShapes = std::vector<GShape>;
using GKinds = std::vector<cv::detail::OpaqueKind>;
struct GTypeInfo
{
GShape shape;
cv::detail::OpaqueKind kind;
};
using GShapes = std::vector<GShape>;
using GKinds = std::vector<cv::detail::OpaqueKind>;
using GCtors = std::vector<detail::HostCtor>;
using GTypesInfo = std::vector<GTypeInfo>;
// GKernel describes kernel API to the system
// FIXME: add attributes of a kernel, (e.g. number and types
@ -41,6 +49,7 @@ struct GAPI_EXPORTS GKernel
M outMeta; // generic adaptor to API::outMeta(...)
GShapes outShapes; // types (shapes) kernel's outputs
GKinds inKinds; // kinds of kernel's inputs (fixme: below)
GCtors outCtors; // captured constructors for template output types
};
// TODO: It's questionable if inKinds should really be here. Instead,
// this information could come from meta.
@ -60,30 +69,31 @@ namespace detail
// yield() is used in graph construction time as a generic method to obtain
// lazy "return value" of G-API operations
//
namespace
template<typename T> struct Yield;
template<> struct Yield<cv::GMat>
{
template<typename T> struct Yield;
template<> struct Yield<cv::GMat>
{
static inline cv::GMat yield(cv::GCall &call, int i) { return call.yield(i); }
};
template<> struct Yield<cv::GMatP>
{
static inline cv::GMatP yield(cv::GCall &call, int i) { return call.yieldP(i); }
};
template<> struct Yield<cv::GScalar>
{
static inline cv::GScalar yield(cv::GCall &call, int i) { return call.yieldScalar(i); }
};
template<typename U> struct Yield<cv::GArray<U> >
{
static inline cv::GArray<U> yield(cv::GCall &call, int i) { return call.yieldArray<U>(i); }
};
template<typename U> struct Yield<cv::GOpaque<U> >
{
static inline cv::GOpaque<U> yield(cv::GCall &call, int i) { return call.yieldOpaque<U>(i); }
};
} // anonymous namespace
static inline cv::GMat yield(cv::GCall &call, int i) { return call.yield(i); }
};
template<> struct Yield<cv::GMatP>
{
static inline cv::GMatP yield(cv::GCall &call, int i) { return call.yieldP(i); }
};
template<> struct Yield<cv::GScalar>
{
static inline cv::GScalar yield(cv::GCall &call, int i) { return call.yieldScalar(i); }
};
template<typename U> struct Yield<cv::GArray<U> >
{
static inline cv::GArray<U> yield(cv::GCall &call, int i) { return call.yieldArray<U>(i); }
};
template<typename U> struct Yield<cv::GOpaque<U> >
{
static inline cv::GOpaque<U> yield(cv::GCall &call, int i) { return call.yieldOpaque<U>(i); }
};
template<> struct Yield<GFrame>
{
static inline cv::GFrame yield(cv::GCall &call, int i) { return call.yieldFrame(i); }
};
////////////////////////////////////////////////////////////////////////////
// Helper classes which brings outputMeta() marshalling to kernel
@ -95,11 +105,12 @@ namespace detail
template<typename T> struct MetaType;
template<> struct MetaType<cv::GMat> { using type = GMatDesc; };
template<> struct MetaType<cv::GMatP> { using type = GMatDesc; };
template<> struct MetaType<cv::GFrame> { using type = GMatDesc; };
template<> struct MetaType<cv::GFrame> { using type = GFrameDesc; };
template<> struct MetaType<cv::GScalar> { using type = GScalarDesc; };
template<typename U> struct MetaType<cv::GArray<U> > { using type = GArrayDesc; };
template<typename U> struct MetaType<cv::GOpaque<U> > { using type = GOpaqueDesc; };
template<typename T> struct MetaType { using type = T; }; // opaque args passed as-is
// FIXME: Move it to type traits?
// 2. Hacky test based on MetaType to check if we operate on G-* type or not
template<typename T> using is_nongapi_type = std::is_same<T, typename MetaType<T>::type>;
@ -214,7 +225,8 @@ public:
, K::tag()
, &K::getOutMeta
, {detail::GTypeTraits<R>::shape...}
, {detail::GTypeTraits<Args>::op_kind...}});
, {detail::GTypeTraits<Args>::op_kind...}
, {detail::GObtainCtor<R>::get()...}});
call.pass(args...); // TODO: std::forward() here?
return yield(call, typename detail::MkSeq<sizeof...(R)>::type());
}
@ -231,15 +243,14 @@ public:
using InArgs = std::tuple<Args...>;
using OutArgs = std::tuple<R>;
static_assert(!cv::detail::contains<GFrame, OutArgs>::value, "Values of GFrame type can't be used as operation outputs");
static R on(Args... args)
{
cv::GCall call(GKernel{ K::id()
, K::tag()
, &K::getOutMeta
, {detail::GTypeTraits<R>::shape}
, {detail::GTypeTraits<Args>::op_kind...}});
, {detail::GTypeTraits<Args>::op_kind...}
, {detail::GObtainCtor<R>::get()}});
call.pass(args...);
return detail::Yield<R>::yield(call, 0);
}
@ -458,11 +469,6 @@ namespace gapi {
std::vector<GTransform> m_transformations;
protected:
/// @private
// Check if package contains ANY implementation of a kernel API
// by API textual id.
bool includesAPI(const std::string &id) const;
/// @private
// Remove ALL implementations of the given API (identified by ID)
void removeAPI(const std::string &id);
@ -565,6 +571,9 @@ namespace gapi {
return includesAPI(KAPI::id());
}
/// @private
bool includesAPI(const std::string &id) const;
// FIXME: The below comment is wrong, and who needs this function?
/**
* @brief Find a kernel (by its API)

View File

@ -65,6 +65,8 @@ public:
using GMat::GMat;
};
class RMat;
/** @} */
/**
@ -113,6 +115,8 @@ struct GAPI_EXPORTS GMatDesc
// and as a 3-channel planar mat with height divided by 3)
bool canDescribe(const cv::Mat& mat) const;
bool canDescribe(const cv::RMat& mat) const;
// Meta combinator: return a new GMatDesc which differs in size by delta
// (all other fields are taken unchanged from this GMatDesc)
// FIXME: a better name?
@ -199,6 +203,27 @@ struct GAPI_EXPORTS GMatDesc
static inline GMatDesc empty_gmat_desc() { return GMatDesc{-1,-1,{-1,-1}}; }
namespace gapi { namespace detail {
/** Checks GMatDesc fields if the passed matrix is a set of n-dimentional points.
@param in GMatDesc to check.
@param n expected dimensionality.
@return the amount of points. In case input matrix can't be described as vector of points
of expected dimensionality, returns -1.
*/
int checkVector(const GMatDesc& in, const size_t n);
/** @overload
Checks GMatDesc fields if the passed matrix can be described as a set of points of any
dimensionality.
@return array of two elements in form of std::vector<int>: the amount of points
and their calculated dimensionality. In case input matrix can't be described as vector of points,
returns {-1, -1}.
*/
std::vector<int> checkVector(const GMatDesc& in);
}} // namespace gapi::detail
#if !defined(GAPI_STANDALONE)
GAPI_EXPORTS GMatDesc descr_of(const cv::UMat &mat);
#endif // !defined(GAPI_STANDALONE)
@ -209,6 +234,8 @@ namespace gapi { namespace own {
GAPI_EXPORTS GMatDesc descr_of(const Mat &mat);
}}//gapi::own
GAPI_EXPORTS GMatDesc descr_of(const RMat &mat);
#if !defined(GAPI_STANDALONE)
GAPI_EXPORTS GMatDesc descr_of(const cv::Mat &mat);
#else

View File

@ -18,6 +18,7 @@
#include <opencv2/gapi/gscalar.hpp>
#include <opencv2/gapi/garray.hpp>
#include <opencv2/gapi/gopaque.hpp>
#include <opencv2/gapi/gframe.hpp>
namespace cv
{
@ -38,6 +39,7 @@ using GMetaArg = util::variant
, GScalarDesc
, GArrayDesc
, GOpaqueDesc
, GFrameDesc
>;
GAPI_EXPORTS std::ostream& operator<<(std::ostream& os, const GMetaArg &);

View File

@ -15,6 +15,7 @@
#include <opencv2/gapi/own/exports.hpp>
#include <opencv2/gapi/opencv_includes.hpp>
#include <opencv2/gapi/util/any.hpp>
#include <opencv2/gapi/util/variant.hpp>
#include <opencv2/gapi/util/throw.hpp>
#include <opencv2/gapi/util/type_traits.hpp>
@ -119,6 +120,7 @@ namespace detail
virtual void mov(BasicOpaqueRef &ref) = 0;
virtual const void* ptr() const = 0;
virtual void set(const cv::util::any &a) = 0;
};
template<typename T> class OpaqueRefT final: public BasicOpaqueRef
@ -212,6 +214,10 @@ namespace detail
}
virtual const void* ptr() const override { return &rref(); }
virtual void set(const cv::util::any &a) override {
wref() = util::any_cast<T>(a);
}
};
// This class strips type information from OpaqueRefT<> and makes it usable
@ -240,7 +246,7 @@ namespace detail
// FIXME: probably won't work with const object
explicit OpaqueRef(T&& obj) :
m_ref(new OpaqueRefT<util::decay_t<T>>(std::forward<T>(obj))),
m_kind(GOpaqueTraits<T>::kind) {}
m_kind(GOpaqueTraits<util::decay_t<T>>::kind) {}
cv::detail::OpaqueKind getKind() const
{
@ -285,6 +291,13 @@ namespace detail
// May be used to uniquely identify this object internally
const void *ptr() const { return m_ref->ptr(); }
// Introduced for in-graph meta handling
OpaqueRef& operator= (const cv::util::any &a)
{
m_ref->set(a);
return *this;
}
};
} // namespace detail
@ -295,25 +308,27 @@ namespace detail
template<typename T> class GOpaque
{
public:
GOpaque() { putDetails(); } // Empty constructor
explicit GOpaque(detail::GOpaqueU &&ref) // GOpaqueU-based constructor
: m_ref(ref) { putDetails(); } // (used by GCall, not for users)
detail::GOpaqueU strip() const { return m_ref; }
private:
// Host type (or Flat type) - the type this GOpaque is actually
// specified to.
using HT = typename detail::flatten_g<util::decay_t<T>>::type;
static void CTor(detail::OpaqueRef& ref) {
ref.reset<HT>();
ref.storeKind<HT>();
GOpaque() { putDetails(); } // Empty constructor
explicit GOpaque(detail::GOpaqueU &&ref) // GOpaqueU-based constructor
: m_ref(ref) { putDetails(); } // (used by GCall, not for users)
/// @private
detail::GOpaqueU strip() const {
return m_ref;
}
/// @private
static void Ctor(detail::OpaqueRef& ref) {
ref.reset<HT>();
}
private:
void putDetails() {
m_ref.setConstructFcn(&CTor);
m_ref.specifyType<HT>(); // FIXME: to unify those 2 to avoid excessive dynamic_cast
m_ref.storeKind<HT>(); //
m_ref.setConstructFcn(&Ctor);
m_ref.specifyType<HT>();
m_ref.storeKind<HT>();
}
detail::GOpaqueU m_ref;

View File

@ -135,7 +135,7 @@ GRunArg value_of(const GOrigin &origin);
// Transform run-time computation arguments into a collection of metadata
// extracted from that arguments
GMetaArg GAPI_EXPORTS descr_of(const GRunArg &arg );
GMetaArgs GAPI_EXPORTS descr_of(const GRunArgs &args);
GMetaArgs GAPI_EXPORTS_W descr_of(const GRunArgs &args);
// Transform run-time operation result argument into metadata extracted from that argument
// Used to compare the metadata, which generated at compile time with the metadata result operation in run time

View File

@ -8,15 +8,99 @@
#ifndef OPENCV_GAPI_GSTREAMING_COMPILED_HPP
#define OPENCV_GAPI_GSTREAMING_COMPILED_HPP
#include <memory>
#include <vector>
#include <opencv2/gapi/opencv_includes.hpp>
#include <opencv2/gapi/own/assert.hpp>
#include <opencv2/gapi/util/optional.hpp>
#include <opencv2/gapi/garg.hpp>
#include <opencv2/gapi/streaming/source.hpp>
namespace cv {
template<class T> using optional = cv::util::optional<T>;
namespace detail {
template<typename T> struct wref_spec {
using type = T;
};
template<typename T> struct wref_spec<std::vector<T> > {
using type = T;
};
template<typename RefHolder>
struct OptRef {
struct OptHolder {
virtual void mov(RefHolder &h) = 0;
virtual void reset() = 0;
virtual ~OptHolder() = default;
using Ptr = std::shared_ptr<OptHolder>;
};
template<class T> struct Holder final: OptHolder {
std::reference_wrapper<cv::optional<T> > m_opt_ref;
explicit Holder(cv::optional<T>& opt) : m_opt_ref(std::ref(opt)) {
}
virtual void mov(RefHolder &h) override {
using U = typename wref_spec<T>::type;
m_opt_ref.get() = cv::util::make_optional(std::move(h.template wref<U>()));
}
virtual void reset() override {
m_opt_ref.get().reset();
}
};
template<class T>
explicit OptRef(cv::optional<T>& t) : m_opt{new Holder<T>(t)} {}
void mov(RefHolder &h) { m_opt->mov(h); }
void reset() { m_opt->reset();}
private:
typename OptHolder::Ptr m_opt;
};
using OptionalVectorRef = OptRef<cv::detail::VectorRef>;
using OptionalOpaqueRef = OptRef<cv::detail::OpaqueRef>;
} // namespace detail
// TODO: Keep it in sync with GRunArgP (derive the type automatically?)
using GOptRunArgP = util::variant<
optional<cv::Mat>*,
optional<cv::RMat>*,
optional<cv::Scalar>*,
cv::detail::OptionalVectorRef,
cv::detail::OptionalOpaqueRef
>;
using GOptRunArgsP = std::vector<GOptRunArgP>;
namespace detail {
template<typename T> inline GOptRunArgP wrap_opt_arg(optional<T>& arg) {
// By default, T goes to an OpaqueRef. All other types are specialized
return GOptRunArgP{OptionalOpaqueRef(arg)};
}
template<typename T> inline GOptRunArgP wrap_opt_arg(optional<std::vector<T> >& arg) {
return GOptRunArgP{OptionalVectorRef(arg)};
}
template<> inline GOptRunArgP wrap_opt_arg(optional<cv::Mat> &m) {
return GOptRunArgP{&m};
}
template<> inline GOptRunArgP wrap_opt_arg(optional<cv::Scalar> &s) {
return GOptRunArgP{&s};
}
} // namespace detail
// Now cv::gout() may produce an empty vector (see "dynamic graphs"), so
// there may be a conflict between these two. State here that Opt version
// _must_ have at least one input for this overload
template<typename T, typename... Ts>
inline GOptRunArgsP gout(optional<T>&arg, optional<Ts>&... args)
{
return GOptRunArgsP{ detail::wrap_opt_arg(arg), detail::wrap_opt_arg(args)... };
}
/**
* \addtogroup gapi_main_classes
* @{
@ -49,11 +133,11 @@ namespace cv {
*
* @sa GCompiled
*/
class GAPI_EXPORTS GStreamingCompiled
class GAPI_EXPORTS_W_SIMPLE GStreamingCompiled
{
public:
class GAPI_EXPORTS Priv;
GStreamingCompiled();
GAPI_WRAP GStreamingCompiled();
// FIXME: More overloads?
/**
@ -96,7 +180,7 @@ public:
* @param ins vector of inputs to process.
* @sa gin
*/
void setSource(GRunArgs &&ins);
GAPI_WRAP void setSource(GRunArgs &&ins);
/**
* @brief Specify an input video stream for a single-input
@ -109,7 +193,23 @@ public:
* @param s a shared pointer to IStreamSource representing the
* input video stream.
*/
void setSource(const gapi::wip::IStreamSource::Ptr& s);
GAPI_WRAP void setSource(const gapi::wip::IStreamSource::Ptr& s);
/**
* @brief Constructs and specifies an input video stream for a
* single-input computation pipeline with the given parameters.
*
* Throws if pipeline is already running. Use stop() and then
* setSource() to run the graph on a new video stream.
*
* @overload
* @param args arguments used to contruct and initialize a stream
* source.
*/
template<typename T, typename... Args>
void setSource(Args&&... args) {
setSource(cv::gapi::wip::make_src<T>(std::forward<Args>(args)...));
}
/**
* @brief Start the pipeline execution.
@ -126,7 +226,7 @@ public:
* start()/stop()/setSource() may be called on the same object in
* multiple threads in your application.
*/
void start();
GAPI_WRAP void start();
/**
* @brief Get the next processed frame from the pipeline.
@ -150,6 +250,47 @@ public:
*/
bool pull(cv::GRunArgsP &&outs);
// NB: Used from python
GAPI_WRAP std::tuple<bool, cv::GRunArgs> pull();
/**
* @brief Get some next available data from the pipeline.
*
* This method takes a vector of cv::optional object. An object is
* assigned to some value if this value is available (ready) at
* the time of the call, and resets the object to empty() if it is
* not.
*
* This is a blocking method which guarantees that some data has
* been written to the output vector on return.
*
* Using this method only makes sense if the graph has
* desynchronized parts (see cv::gapi::desync). If there is no
* desynchronized parts in the graph, the behavior of this
* method is identical to the regular pull() (all data objects are
* produced synchronously in the output vector).
*
* Use gout() to create an output parameter vector.
*
* Output vectors must have the same number of elements as defined
* in the cv::GComputation protocol (at the moment of its
* construction). Shapes of elements also must conform to protocol
* (e.g. cv::optional<cv::Mat> needs to be passed where cv::GMat
* has been declared as output, and so on). Run-time exception is
* generated on type mismatch.
*
* This method writes new data into objects passed via output
* vector. If there is no data ready yet, this method blocks. Use
* try_pull() if you need a non-blocking version.
*
* @param outs vector of output parameters to obtain.
* @return true if next result has been obtained,
* false marks end of the stream.
*
* @sa cv::gapi::desync
*/
bool pull(cv::GOptRunArgsP &&outs);
/**
* @brief Try to get the next processed frame from the pipeline.
*
@ -172,7 +313,7 @@ public:
*
* Throws if the pipeline is not running.
*/
void stop();
GAPI_WRAP void stop();
/**
* @brief Test if the pipeline is running.
@ -184,7 +325,7 @@ public:
*
* @return true if the current stream is not over yet.
*/
bool running() const;
GAPI_WRAP bool running() const;
/// @private
Priv& priv();

View File

@ -17,6 +17,7 @@
#include <opencv2/gapi/gopaque.hpp>
#include <opencv2/gapi/gframe.hpp>
#include <opencv2/gapi/streaming/source.hpp>
#include <opencv2/gapi/media.hpp>
#include <opencv2/gapi/gcommon.hpp>
namespace cv
@ -67,7 +68,7 @@ namespace detail
template<> struct GTypeTraits<cv::GFrame>
{
static constexpr const ArgKind kind = ArgKind::GFRAME;
static constexpr const GShape shape = GShape::GMAT;
static constexpr const GShape shape = GShape::GFRAME;
static constexpr const OpaqueKind op_kind = OpaqueKind::CV_UNKNOWN;
};
template<> struct GTypeTraits<cv::GScalar>
@ -121,9 +122,11 @@ namespace detail
template<> struct GTypeOf<cv::UMat> { using type = cv::GMat; };
#endif // !defined(GAPI_STANDALONE)
template<> struct GTypeOf<cv::Mat> { using type = cv::GMat; };
template<> struct GTypeOf<cv::RMat> { using type = cv::GMat; };
template<> struct GTypeOf<cv::Scalar> { using type = cv::GScalar; };
template<typename U> struct GTypeOf<std::vector<U> > { using type = cv::GArray<U>; };
template<typename U> struct GTypeOf { using type = cv::GOpaque<U>;};
template<> struct GTypeOf<cv::MediaFrame> { using type = cv::GFrame; };
// FIXME: This is not quite correct since IStreamSource may produce not only Mat but also Scalar
// and vector data. TODO: Extend the type dispatching on these types too.
template<> struct GTypeOf<cv::gapi::wip::IStreamSource::Ptr> { using type = cv::GMat;};
@ -188,6 +191,29 @@ namespace detail
template<typename T> using wrap_gapi_helper = WrapValue<typename std::decay<T>::type>;
template<typename T> using wrap_host_helper = WrapValue<typename std::decay<g_type_of_t<T> >::type>;
// Union type for various user-defined type constructors (GArray<T>,
// GOpaque<T>, etc)
//
// TODO: Replace construct-only API with a more generic one (probably
// with bits of introspection)
//
// Not required for non-user-defined types (GMat, GScalar, etc)
using HostCtor = util::variant
< util::monostate
, detail::ConstructVec
, detail::ConstructOpaque
>;
template<typename T> struct GObtainCtor {
static HostCtor get() { return HostCtor{}; }
};
template<typename T> struct GObtainCtor<GArray<T> > {
static HostCtor get() { return HostCtor{ConstructVec{&GArray<T>::VCtor}}; };
};
template<typename T> struct GObtainCtor<GOpaque<T> > {
static HostCtor get() { return HostCtor{ConstructOpaque{&GOpaque<T>::Ctor}}; };
};
} // namespace detail
} // namespace cv

View File

@ -21,14 +21,36 @@
@{
@defgroup gapi_filters Graph API: Image filters
@defgroup gapi_colorconvert Graph API: Converting image from one color space to another
@defgroup gapi_feature Graph API: Image Feature Detection
@defgroup gapi_shape Graph API: Image Structural Analysis and Shape Descriptors
@}
*/
namespace {
void validateFindingContoursMeta(const int depth, const int chan, const int mode)
{
GAPI_Assert(chan == 1);
switch (mode)
{
case cv::RETR_CCOMP:
GAPI_Assert(depth == CV_8U || depth == CV_32S);
break;
case cv::RETR_FLOODFILL:
GAPI_Assert(depth == CV_32S);
break;
default:
GAPI_Assert(depth == CV_8U);
break;
}
}
} // anonymous namespace
namespace cv { namespace gapi {
namespace imgproc {
using GMat2 = std::tuple<GMat,GMat>;
using GMat3 = std::tuple<GMat,GMat,GMat>; // FIXME: how to avoid this?
using GFindContoursOutput = std::tuple<GArray<GArray<Point>>,GArray<Vec4i>>;
G_TYPED_KERNEL(GFilter2D, <GMat(GMat,int,Mat,Point,Scalar,int,Scalar)>,"org.opencv.imgproc.filters.filter2D") {
static GMatDesc outMeta(GMatDesc in, int ddepth, Mat, Point, Scalar, int, Scalar) {
@ -78,6 +100,14 @@ namespace imgproc {
}
};
G_TYPED_KERNEL(GMorphologyEx, <GMat(GMat,MorphTypes,Mat,Point,int,BorderTypes,Scalar)>,
"org.opencv.imgproc.filters.morphologyEx") {
static GMatDesc outMeta(const GMatDesc &in, MorphTypes, Mat, Point, int,
BorderTypes, Scalar) {
return in;
}
};
G_TYPED_KERNEL(GSobel, <GMat(GMat,int,int,int,int,double,double,int,Scalar)>, "org.opencv.imgproc.filters.sobel") {
static GMatDesc outMeta(GMatDesc in, int ddepth, int, int, int, double, double, int, Scalar) {
return in.withDepth(ddepth);
@ -110,7 +140,7 @@ namespace imgproc {
}
};
G_TYPED_KERNEL(GCanny, <GMat(GMat,double,double,int,bool)>, "org.opencv.imgproc.canny"){
G_TYPED_KERNEL(GCanny, <GMat(GMat,double,double,int,bool)>, "org.opencv.imgproc.feature.canny"){
static GMatDesc outMeta(GMatDesc in, double, double, int, bool) {
return in.withType(CV_8U, 1);
}
@ -118,12 +148,164 @@ namespace imgproc {
G_TYPED_KERNEL(GGoodFeatures,
<cv::GArray<cv::Point2f>(GMat,int,double,double,Mat,int,bool,double)>,
"org.opencv.imgproc.goodFeaturesToTrack") {
"org.opencv.imgproc.feature.goodFeaturesToTrack") {
static GArrayDesc outMeta(GMatDesc, int, double, double, const Mat&, int, bool, double) {
return empty_array_desc();
}
};
using RetrMode = RetrievalModes;
using ContMethod = ContourApproximationModes;
G_TYPED_KERNEL(GFindContours, <GArray<GArray<Point>>(GMat,RetrMode,ContMethod,GOpaque<Point>)>,
"org.opencv.imgproc.shape.findContours")
{
static GArrayDesc outMeta(GMatDesc in, RetrMode mode, ContMethod, GOpaqueDesc)
{
validateFindingContoursMeta(in.depth, in.chan, mode);
return empty_array_desc();
}
};
// FIXME oc: make default value offset = Point()
G_TYPED_KERNEL(GFindContoursNoOffset, <GArray<GArray<Point>>(GMat,RetrMode,ContMethod)>,
"org.opencv.imgproc.shape.findContoursNoOffset")
{
static GArrayDesc outMeta(GMatDesc in, RetrMode mode, ContMethod)
{
validateFindingContoursMeta(in.depth, in.chan, mode);
return empty_array_desc();
}
};
G_TYPED_KERNEL(GFindContoursH,<GFindContoursOutput(GMat,RetrMode,ContMethod,GOpaque<Point>)>,
"org.opencv.imgproc.shape.findContoursH")
{
static std::tuple<GArrayDesc,GArrayDesc>
outMeta(GMatDesc in, RetrMode mode, ContMethod, GOpaqueDesc)
{
validateFindingContoursMeta(in.depth, in.chan, mode);
return std::make_tuple(empty_array_desc(), empty_array_desc());
}
};
// FIXME oc: make default value offset = Point()
G_TYPED_KERNEL(GFindContoursHNoOffset,<GFindContoursOutput(GMat,RetrMode,ContMethod)>,
"org.opencv.imgproc.shape.findContoursHNoOffset")
{
static std::tuple<GArrayDesc,GArrayDesc>
outMeta(GMatDesc in, RetrMode mode, ContMethod)
{
validateFindingContoursMeta(in.depth, in.chan, mode);
return std::make_tuple(empty_array_desc(), empty_array_desc());
}
};
G_TYPED_KERNEL(GBoundingRectMat, <GOpaque<Rect>(GMat)>,
"org.opencv.imgproc.shape.boundingRectMat") {
static GOpaqueDesc outMeta(GMatDesc in) {
if (in.depth == CV_8U)
{
GAPI_Assert(in.chan == 1);
}
else
{
GAPI_Assert (in.depth == CV_32S || in.depth == CV_32F);
int amount = detail::checkVector(in, 2u);
GAPI_Assert(amount != -1 &&
"Input Mat can't be described as vector of 2-dimentional points");
}
return empty_gopaque_desc();
}
};
G_TYPED_KERNEL(GBoundingRectVector32S, <GOpaque<Rect>(GArray<Point2i>)>,
"org.opencv.imgproc.shape.boundingRectVector32S") {
static GOpaqueDesc outMeta(GArrayDesc) {
return empty_gopaque_desc();
}
};
G_TYPED_KERNEL(GBoundingRectVector32F, <GOpaque<Rect>(GArray<Point2f>)>,
"org.opencv.imgproc.shape.boundingRectVector32F") {
static GOpaqueDesc outMeta(GArrayDesc) {
return empty_gopaque_desc();
}
};
G_TYPED_KERNEL(GFitLine2DMat, <GOpaque<Vec4f>(GMat,DistanceTypes,double,double,double)>,
"org.opencv.imgproc.shape.fitLine2DMat") {
static GOpaqueDesc outMeta(GMatDesc in,DistanceTypes,double,double,double) {
int amount = detail::checkVector(in, 2u);
GAPI_Assert(amount != -1 &&
"Input Mat can't be described as vector of 2-dimentional points");
return empty_gopaque_desc();
}
};
G_TYPED_KERNEL(GFitLine2DVector32S,
<GOpaque<Vec4f>(GArray<Point2i>,DistanceTypes,double,double,double)>,
"org.opencv.imgproc.shape.fitLine2DVector32S") {
static GOpaqueDesc outMeta(GArrayDesc,DistanceTypes,double,double,double) {
return empty_gopaque_desc();
}
};
G_TYPED_KERNEL(GFitLine2DVector32F,
<GOpaque<Vec4f>(GArray<Point2f>,DistanceTypes,double,double,double)>,
"org.opencv.imgproc.shape.fitLine2DVector32F") {
static GOpaqueDesc outMeta(GArrayDesc,DistanceTypes,double,double,double) {
return empty_gopaque_desc();
}
};
G_TYPED_KERNEL(GFitLine2DVector64F,
<GOpaque<Vec4f>(GArray<Point2d>,DistanceTypes,double,double,double)>,
"org.opencv.imgproc.shape.fitLine2DVector64F") {
static GOpaqueDesc outMeta(GArrayDesc,DistanceTypes,double,double,double) {
return empty_gopaque_desc();
}
};
G_TYPED_KERNEL(GFitLine3DMat, <GOpaque<Vec6f>(GMat,DistanceTypes,double,double,double)>,
"org.opencv.imgproc.shape.fitLine3DMat") {
static GOpaqueDesc outMeta(GMatDesc in,int,double,double,double) {
int amount = detail::checkVector(in, 3u);
GAPI_Assert(amount != -1 &&
"Input Mat can't be described as vector of 3-dimentional points");
return empty_gopaque_desc();
}
};
G_TYPED_KERNEL(GFitLine3DVector32S,
<GOpaque<Vec6f>(GArray<Point3i>,DistanceTypes,double,double,double)>,
"org.opencv.imgproc.shape.fitLine3DVector32S") {
static GOpaqueDesc outMeta(GArrayDesc,DistanceTypes,double,double,double) {
return empty_gopaque_desc();
}
};
G_TYPED_KERNEL(GFitLine3DVector32F,
<GOpaque<Vec6f>(GArray<Point3f>,DistanceTypes,double,double,double)>,
"org.opencv.imgproc.shape.fitLine3DVector32F") {
static GOpaqueDesc outMeta(GArrayDesc,DistanceTypes,double,double,double) {
return empty_gopaque_desc();
}
};
G_TYPED_KERNEL(GFitLine3DVector64F,
<GOpaque<Vec6f>(GArray<Point3d>,DistanceTypes,double,double,double)>,
"org.opencv.imgproc.shape.fitLine3DVector64F") {
static GOpaqueDesc outMeta(GArrayDesc,DistanceTypes,double,double,double) {
return empty_gopaque_desc();
}
};
G_TYPED_KERNEL(GBGR2RGB, <GMat(GMat)>, "org.opencv.imgproc.colorconvert.bgr2rgb") {
static GMatDesc outMeta(GMatDesc in) {
return in; // type still remains CV_8UC3;
}
};
G_TYPED_KERNEL(GRGB2YUV, <GMat(GMat)>, "org.opencv.imgproc.colorconvert.rgb2yuv") {
static GMatDesc outMeta(GMatDesc in) {
return in; // type still remains CV_8UC3;
@ -136,6 +318,42 @@ namespace imgproc {
}
};
G_TYPED_KERNEL(GBGR2I420, <GMat(GMat)>, "org.opencv.imgproc.colorconvert.bgr2i420") {
static GMatDesc outMeta(GMatDesc in) {
GAPI_Assert(in.depth == CV_8U);
GAPI_Assert(in.chan == 3);
GAPI_Assert(in.size.height % 2 == 0);
return in.withType(in.depth, 1).withSize(Size(in.size.width, in.size.height * 3 / 2));
}
};
G_TYPED_KERNEL(GRGB2I420, <GMat(GMat)>, "org.opencv.imgproc.colorconvert.rgb2i420") {
static GMatDesc outMeta(GMatDesc in) {
GAPI_Assert(in.depth == CV_8U);
GAPI_Assert(in.chan == 3);
GAPI_Assert(in.size.height % 2 == 0);
return in.withType(in.depth, 1).withSize(Size(in.size.width, in.size.height * 3 / 2));
}
};
G_TYPED_KERNEL(GI4202BGR, <GMat(GMat)>, "org.opencv.imgproc.colorconvert.i4202bgr") {
static GMatDesc outMeta(GMatDesc in) {
GAPI_Assert(in.depth == CV_8U);
GAPI_Assert(in.chan == 1);
GAPI_Assert(in.size.height % 3 == 0);
return in.withType(in.depth, 3).withSize(Size(in.size.width, in.size.height * 2 / 3));
}
};
G_TYPED_KERNEL(GI4202RGB, <GMat(GMat)>, "org.opencv.imgproc.colorconvert.i4202rgb") {
static GMatDesc outMeta(GMatDesc in) {
GAPI_Assert(in.depth == CV_8U);
GAPI_Assert(in.chan == 1);
GAPI_Assert(in.size.height % 3 == 0);
return in.withType(in.depth, 3).withSize(Size(in.size.width, in.size.height * 2 / 3));
}
};
G_TYPED_KERNEL(GNV12toRGB, <GMat(GMat, GMat)>, "org.opencv.imgproc.colorconvert.nv12torgb") {
static GMatDesc outMeta(GMatDesc in_y, GMatDesc in_uv) {
GAPI_Assert(in_y.chan == 1);
@ -230,7 +448,7 @@ namespace imgproc {
}
};
G_TYPED_KERNEL(GNV12toRGBp, <GMatP(GMat,GMat)>, "org.opencv.colorconvert.imgproc.nv12torgbp") {
G_TYPED_KERNEL(GNV12toRGBp, <GMatP(GMat,GMat)>, "org.opencv.imgproc.colorconvert.nv12torgbp") {
static GMatDesc outMeta(GMatDesc inY, GMatDesc inUV) {
GAPI_Assert(inY.depth == CV_8U);
GAPI_Assert(inUV.depth == CV_8U);
@ -244,7 +462,7 @@ namespace imgproc {
}
};
G_TYPED_KERNEL(GNV12toGray, <GMat(GMat,GMat)>, "org.opencv.colorconvert.imgproc.nv12togray") {
G_TYPED_KERNEL(GNV12toGray, <GMat(GMat,GMat)>, "org.opencv.imgproc.colorconvert.nv12togray") {
static GMatDesc outMeta(GMatDesc inY, GMatDesc inUV) {
GAPI_Assert(inY.depth == CV_8U);
GAPI_Assert(inUV.depth == CV_8U);
@ -259,7 +477,7 @@ namespace imgproc {
}
};
G_TYPED_KERNEL(GNV12toBGRp, <GMatP(GMat,GMat)>, "org.opencv.colorconvert.imgproc.nv12tobgrp") {
G_TYPED_KERNEL(GNV12toBGRp, <GMatP(GMat,GMat)>, "org.opencv.imgproc.colorconvert.nv12tobgrp") {
static GMatDesc outMeta(GMatDesc inY, GMatDesc inUV) {
GAPI_Assert(inY.depth == CV_8U);
GAPI_Assert(inUV.depth == CV_8U);
@ -285,10 +503,10 @@ kernel kernelY. The final result is returned.
Supported matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.
Output image must have the same type, size, and number of channels as the input image.
@note In case of floating-point computation, rounding to nearest even is procedeed
@note
- In case of floating-point computation, rounding to nearest even is procedeed
if hardware supports it (if not - to nearest value).
@note Function textual ID is "org.opencv.imgproc.filters.sepfilter"
- Function textual ID is "org.opencv.imgproc.filters.sepfilter"
@param src Source image.
@param ddepth desired depth of the destination image (the following combinations of src.depth() and ddepth are supported:
@ -327,9 +545,9 @@ anchor.y - 1)`.
Supported matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.
Output image must have the same size and number of channels an input image.
@note Rounding to nearest even is procedeed if hardware supports it, if not - to nearest.
@note Function textual ID is "org.opencv.imgproc.filters.filter2D"
@note
- Rounding to nearest even is procedeed if hardware supports it, if not - to nearest.
- Function textual ID is "org.opencv.imgproc.filters.filter2D"
@param src input image.
@param ddepth desired depth of the destination image
@ -364,9 +582,9 @@ algorithms, and so on). If you need to compute pixel sums over variable-size win
Supported input matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.
Output image must have the same type, size, and number of channels as the input image.
@note Rounding to nearest even is procedeed if hardware supports it, if not - to nearest.
@note Function textual ID is "org.opencv.imgproc.filters.boxfilter"
@note
- Rounding to nearest even is procedeed if hardware supports it, if not - to nearest.
- Function textual ID is "org.opencv.imgproc.filters.boxfilter"
@param src Source image.
@param dtype the output image depth (-1 to set the input image data type).
@ -393,9 +611,9 @@ true, borderType)`.
Supported input matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.
Output image must have the same type, size, and number of channels as the input image.
@note Rounding to nearest even is procedeed if hardware supports it, if not - to nearest.
@note Function textual ID is "org.opencv.imgproc.filters.blur"
@note
- Rounding to nearest even is procedeed if hardware supports it, if not - to nearest.
- Function textual ID is "org.opencv.imgproc.filters.blur"
@param src Source image.
@param ksize blurring kernel size.
@ -421,9 +639,9 @@ Output image must have the same type and number of channels an input image.
Supported input matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.
Output image must have the same type, size, and number of channels as the input image.
@note Rounding to nearest even is procedeed if hardware supports it, if not - to nearest.
@note Function textual ID is "org.opencv.imgproc.filters.gaussianBlur"
@note
- Rounding to nearest even is procedeed if hardware supports it, if not - to nearest.
- Function textual ID is "org.opencv.imgproc.filters.gaussianBlur"
@param src input image;
@param ksize Gaussian kernel size. ksize.width and ksize.height can differ but they both must be
@ -446,16 +664,16 @@ GAPI_EXPORTS GMat gaussianBlur(const GMat& src, const Size& ksize, double sigmaX
The function smoothes an image using the median filter with the \f$\texttt{ksize} \times
\texttt{ksize}\f$ aperture. Each channel of a multi-channel image is processed independently.
Output image must have the same type, size, and number of channels as the input image.
@note Rounding to nearest even is procedeed if hardware supports it, if not - to nearest.
@note
- Rounding to nearest even is procedeed if hardware supports it, if not - to nearest.
The median filter uses cv::BORDER_REPLICATE internally to cope with border pixels, see cv::BorderTypes
@note Function textual ID is "org.opencv.imgproc.filters.medianBlur"
- Function textual ID is "org.opencv.imgproc.filters.medianBlur"
@param src input matrix (image)
@param ksize aperture linear size; it must be odd and greater than 1, for example: 3, 5, 7 ...
@sa boxFilter, gaussianBlur
*/
GAPI_EXPORTS GMat medianBlur(const GMat& src, int ksize);
GAPI_EXPORTS_W GMat medianBlur(const GMat& src, int ksize);
/** @brief Erodes an image by using a specific structuring element.
@ -467,9 +685,9 @@ shape of a pixel neighborhood over which the minimum is taken:
Erosion can be applied several (iterations) times. In case of multi-channel images, each channel is processed independently.
Supported input matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, and @ref CV_32FC1.
Output image must have the same type, size, and number of channels as the input image.
@note Rounding to nearest even is procedeed if hardware supports it, if not - to nearest.
@note Function textual ID is "org.opencv.imgproc.filters.erode"
@note
- Rounding to nearest even is procedeed if hardware supports it, if not - to nearest.
- Function textual ID is "org.opencv.imgproc.filters.erode"
@param src input image
@param kernel structuring element used for erosion; if `element=Mat()`, a `3 x 3` rectangular
@ -479,7 +697,7 @@ anchor is at the element center.
@param iterations number of times erosion is applied.
@param borderType pixel extrapolation method, see cv::BorderTypes
@param borderValue border value in case of a constant border
@sa dilate
@sa dilate, morphologyEx
*/
GAPI_EXPORTS GMat erode(const GMat& src, const Mat& kernel, const Point& anchor = Point(-1,-1), int iterations = 1,
int borderType = BORDER_CONSTANT,
@ -491,7 +709,9 @@ The function erodes the source image using the rectangular structuring element w
Erosion can be applied several (iterations) times. In case of multi-channel images, each channel is processed independently.
Supported input matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, and @ref CV_32FC1.
Output image must have the same type, size, and number of channels as the input image.
@note Rounding to nearest even is procedeed if hardware supports it, if not - to nearest.
@note
- Rounding to nearest even is procedeed if hardware supports it, if not - to nearest.
- Function textual ID is "org.opencv.imgproc.filters.erode"
@param src input image
@param iterations number of times erosion is applied.
@ -512,9 +732,9 @@ shape of a pixel neighborhood over which the maximum is taken:
Dilation can be applied several (iterations) times. In case of multi-channel images, each channel is processed independently.
Supported input matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, and @ref CV_32FC1.
Output image must have the same type, size, and number of channels as the input image.
@note Rounding to nearest even is procedeed if hardware supports it, if not - to nearest.
@note Function textual ID is "org.opencv.imgproc.filters.dilate"
@note
- Rounding to nearest even is procedeed if hardware supports it, if not - to nearest.
- Function textual ID is "org.opencv.imgproc.filters.dilate"
@param src input image.
@param kernel structuring element used for dilation; if elemenat=Mat(), a 3 x 3 rectangular
@ -539,9 +759,9 @@ shape of a pixel neighborhood over which the maximum is taken:
Dilation can be applied several (iterations) times. In case of multi-channel images, each channel is processed independently.
Supported input matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, and @ref CV_32FC1.
Output image must have the same type, size, and number of channels as the input image.
@note Rounding to nearest even is procedeed if hardware supports it, if not - to nearest.
@note Function textual ID is "org.opencv.imgproc.filters.dilate"
@note
- Rounding to nearest even is procedeed if hardware supports it, if not - to nearest.
- Function textual ID is "org.opencv.imgproc.filters.dilate"
@param src input image.
@param iterations number of times dilation is applied.
@ -554,6 +774,38 @@ GAPI_EXPORTS GMat dilate3x3(const GMat& src, int iterations = 1,
int borderType = BORDER_CONSTANT,
const Scalar& borderValue = morphologyDefaultBorderValue());
/** @brief Performs advanced morphological transformations.
The function can perform advanced morphological transformations using an erosion and dilation as
basic operations.
Any of the operations can be done in-place. In case of multi-channel images, each channel is
processed independently.
@note
- Function textual ID is "org.opencv.imgproc.filters.morphologyEx"
- The number of iterations is the number of times erosion or dilatation operation will be
applied. For instance, an opening operation (#MORPH_OPEN) with two iterations is equivalent to
apply successively: erode -> erode -> dilate -> dilate
(and not erode -> dilate -> erode -> dilate).
@param src Input image.
@param op Type of a morphological operation, see #MorphTypes
@param kernel Structuring element. It can be created using #getStructuringElement.
@param anchor Anchor position within the element. Both negative values mean that the anchor is at
the kernel center.
@param iterations Number of times erosion and dilation are applied.
@param borderType Pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported.
@param borderValue Border value in case of a constant border. The default value has a special
meaning.
@sa dilate, erode, getStructuringElement
*/
GAPI_EXPORTS GMat morphologyEx(const GMat &src, const MorphTypes op, const Mat &kernel,
const Point &anchor = Point(-1,-1),
const int iterations = 1,
const BorderTypes borderType = BORDER_CONSTANT,
const Scalar &borderValue = morphologyDefaultBorderValue());
/** @brief Calculates the first, second, third, or mixed image derivatives using an extended Sobel operator.
In all cases except one, the \f$\texttt{ksize} \times \texttt{ksize}\f$ separable kernel is used to
@ -583,9 +835,9 @@ The second case corresponds to a kernel of:
\f[\vecthreethree{-1}{-2}{-1}{0}{0}{0}{1}{2}{1}\f]
@note Rounding to nearest even is procedeed if hardware supports it, if not - to nearest.
@note Function textual ID is "org.opencv.imgproc.filters.sobel"
@note
- Rounding to nearest even is procedeed if hardware supports it, if not - to nearest.
- Function textual ID is "org.opencv.imgproc.filters.sobel"
@param src input image.
@param ddepth output image depth, see @ref filter_depths "combinations"; in the case of
@ -634,11 +886,10 @@ The second case corresponds to a kernel of:
\f[\vecthreethree{-1}{-2}{-1}{0}{0}{0}{1}{2}{1}\f]
@note First returned matrix correspons to dx derivative while the second one to dy.
@note Rounding to nearest even is procedeed if hardware supports it, if not - to nearest.
@note Function textual ID is "org.opencv.imgproc.filters.sobelxy"
@note
- First returned matrix correspons to dx derivative while the second one to dy.
- Rounding to nearest even is procedeed if hardware supports it, if not - to nearest.
- Function textual ID is "org.opencv.imgproc.filters.sobelxy"
@param src input image.
@param ddepth output image depth, see @ref filter_depths "combinations"; in the case of
@ -719,6 +970,10 @@ proportional to sigmaSpace.
GAPI_EXPORTS GMat bilateralFilter(const GMat& src, int d, double sigmaColor, double sigmaSpace,
int borderType = BORDER_DEFAULT);
//! @} gapi_filters
//! @addtogroup gapi_feature
//! @{
/** @brief Finds edges in an image using the Canny algorithm.
The function finds edges in the input image and marks them in the output map edges using the
@ -726,7 +981,7 @@ Canny algorithm. The smallest value between threshold1 and threshold2 is used fo
largest value is used to find initial segments of strong edges. See
<http://en.wikipedia.org/wiki/Canny_edge_detector>
@note Function textual ID is "org.opencv.imgproc.filters.canny"
@note Function textual ID is "org.opencv.imgproc.feature.canny"
@param image 8-bit input image.
@param threshold1 first threshold for the hysteresis procedure.
@ -757,11 +1012,11 @@ described in @cite Shi94
The function can be used to initialize a point-based tracker of an object.
@note If the function is called with different values A and B of the parameter qualityLevel , and
@note
- If the function is called with different values A and B of the parameter qualityLevel , and
A \> B, the vector of returned corners with qualityLevel=A will be the prefix of the output vector
with qualityLevel=B .
@note Function textual ID is "org.opencv.imgproc.goodFeaturesToTrack"
- Function textual ID is "org.opencv.imgproc.feature.goodFeaturesToTrack"
@param image Input 8-bit or floating-point 32-bit, single-channel image.
@param maxCorners Maximum number of corners to return. If there are more corners than are found,
@ -784,7 +1039,7 @@ or #cornerMinEigenVal.
@return vector of detected corners.
*/
GAPI_EXPORTS GArray<Point2f> goodFeaturesToTrack(const GMat &image,
GAPI_EXPORTS_W GArray<Point2f> goodFeaturesToTrack(const GMat &image,
int maxCorners,
double qualityLevel,
double minDistance,
@ -795,6 +1050,8 @@ GAPI_EXPORTS GArray<Point2f> goodFeaturesToTrack(const GMat &image,
/** @brief Equalizes the histogram of a grayscale image.
//! @} gapi_feature
The function equalizes the histogram of the input image using the following algorithm:
- Calculate the histogram \f$H\f$ for src .
@ -804,19 +1061,290 @@ The function equalizes the histogram of the input image using the following algo
- Transform the image using \f$H'\f$ as a look-up table: \f$\texttt{dst}(x,y) = H'(\texttt{src}(x,y))\f$
The algorithm normalizes the brightness and increases the contrast of the image.
@note The returned image is of the same size and type as input.
@note Function textual ID is "org.opencv.imgproc.equalizeHist"
@note
- The returned image is of the same size and type as input.
- Function textual ID is "org.opencv.imgproc.equalizeHist"
@param src Source 8-bit single channel image.
*/
GAPI_EXPORTS GMat equalizeHist(const GMat& src);
//! @} gapi_filters
//! @addtogroup gapi_shape
//! @{
/** @brief Finds contours in a binary image.
The function retrieves contours from the binary image using the algorithm @cite Suzuki85 .
The contours are a useful tool for shape analysis and object detection and recognition.
See squares.cpp in the OpenCV sample directory.
@note Function textual ID is "org.opencv.imgproc.shape.findContours"
@param src Input gray-scale image @ref CV_8UC1. Non-zero pixels are treated as 1's. Zero
pixels remain 0's, so the image is treated as binary . You can use #compare, #inRange, #threshold ,
#adaptiveThreshold, #Canny, and others to create a binary image out of a grayscale or color one.
If mode equals to #RETR_CCOMP, the input can also be a 32-bit integer
image of labels ( @ref CV_32SC1 ). If #RETR_FLOODFILL then @ref CV_32SC1 is supported only.
@param mode Contour retrieval mode, see #RetrievalModes
@param method Contour approximation method, see #ContourApproximationModes
@param offset Optional offset by which every contour point is shifted. This is useful if the
contours are extracted from the image ROI and then they should be analyzed in the whole image
context.
@return GArray of detected contours. Each contour is stored as a GArray of points.
*/
GAPI_EXPORTS GArray<GArray<Point>>
findContours(const GMat &src, const RetrievalModes mode, const ContourApproximationModes method,
const GOpaque<Point> &offset);
// FIXME oc: make default value offset = Point()
/** @overload
@note Function textual ID is "org.opencv.imgproc.shape.findContoursNoOffset"
*/
GAPI_EXPORTS GArray<GArray<Point>>
findContours(const GMat &src, const RetrievalModes mode, const ContourApproximationModes method);
/** @brief Finds contours and their hierarchy in a binary image.
The function retrieves contours from the binary image using the algorithm @cite Suzuki85
and calculates their hierarchy.
The contours are a useful tool for shape analysis and object detection and recognition.
See squares.cpp in the OpenCV sample directory.
@note Function textual ID is "org.opencv.imgproc.shape.findContoursH"
@param src Input gray-scale image @ref CV_8UC1. Non-zero pixels are treated as 1's. Zero
pixels remain 0's, so the image is treated as binary . You can use #compare, #inRange, #threshold ,
#adaptiveThreshold, #Canny, and others to create a binary image out of a grayscale or color one.
If mode equals to #RETR_CCOMP, the input can also be a 32-bit integer
image of labels ( @ref CV_32SC1 ). If #RETR_FLOODFILL -- @ref CV_32SC1 supports only.
@param mode Contour retrieval mode, see #RetrievalModes
@param method Contour approximation method, see #ContourApproximationModes
@param offset Optional offset by which every contour point is shifted. This is useful if the
contours are extracted from the image ROI and then they should be analyzed in the whole image
context.
@return
- GArray of detected contours. Each contour is stored as a GArray of points.
- Optional output GArray of cv::Vec4i, containing information about the image topology.
It has as many elements as the number of contours. For each i-th contour contours[i], the elements
hierarchy[i][0] , hierarchy[i][1] , hierarchy[i][2] , and hierarchy[i][3] are set to 0-based
indices in contours of the next and previous contours at the same hierarchical level, the first
child contour and the parent contour, respectively. If for the contour i there are no next,
previous, parent, or nested contours, the corresponding elements of hierarchy[i] will be negative.
*/
GAPI_EXPORTS std::tuple<GArray<GArray<Point>>,GArray<Vec4i>>
findContoursH(const GMat &src, const RetrievalModes mode, const ContourApproximationModes method,
const GOpaque<Point> &offset);
// FIXME oc: make default value offset = Point()
/** @overload
@note Function textual ID is "org.opencv.imgproc.shape.findContoursHNoOffset"
*/
GAPI_EXPORTS std::tuple<GArray<GArray<Point>>,GArray<Vec4i>>
findContoursH(const GMat &src, const RetrievalModes mode, const ContourApproximationModes method);
/** @brief Calculates the up-right bounding rectangle of a point set or non-zero pixels
of gray-scale image.
The function calculates and returns the minimal up-right bounding rectangle for the specified
point set or non-zero pixels of gray-scale image.
@note
- Function textual ID is "org.opencv.imgproc.shape.boundingRectMat"
- In case of a 2D points' set given, Mat should be 2-dimensional, have a single row or column
if there are 2 channels, or have 2 columns if there is a single channel. Mat should have either
@ref CV_32S or @ref CV_32F depth
@param src Input gray-scale image @ref CV_8UC1; or input set of @ref CV_32S or @ref CV_32F
2D points stored in Mat.
*/
GAPI_EXPORTS GOpaque<Rect> boundingRect(const GMat& src);
/** @overload
Calculates the up-right bounding rectangle of a point set.
@note Function textual ID is "org.opencv.imgproc.shape.boundingRectVector32S"
@param src Input 2D point set, stored in std::vector<cv::Point2i>.
*/
GAPI_EXPORTS GOpaque<Rect> boundingRect(const GArray<Point2i>& src);
/** @overload
Calculates the up-right bounding rectangle of a point set.
@note Function textual ID is "org.opencv.imgproc.shape.boundingRectVector32F"
@param src Input 2D point set, stored in std::vector<cv::Point2f>.
*/
GAPI_EXPORTS GOpaque<Rect> boundingRect(const GArray<Point2f>& src);
/** @brief Fits a line to a 2D point set.
The function fits a line to a 2D point set by minimizing \f$\sum_i \rho(r_i)\f$ where
\f$r_i\f$ is a distance between the \f$i^{th}\f$ point, the line and \f$\rho(r)\f$ is a distance
function, one of the following:
- DIST_L2
\f[\rho (r) = r^2/2 \quad \text{(the simplest and the fastest least-squares method)}\f]
- DIST_L1
\f[\rho (r) = r\f]
- DIST_L12
\f[\rho (r) = 2 \cdot ( \sqrt{1 + \frac{r^2}{2}} - 1)\f]
- DIST_FAIR
\f[\rho \left (r \right ) = C^2 \cdot \left ( \frac{r}{C} - \log{\left(1 + \frac{r}{C}\right)} \right ) \quad \text{where} \quad C=1.3998\f]
- DIST_WELSCH
\f[\rho \left (r \right ) = \frac{C^2}{2} \cdot \left ( 1 - \exp{\left(-\left(\frac{r}{C}\right)^2\right)} \right ) \quad \text{where} \quad C=2.9846\f]
- DIST_HUBER
\f[\rho (r) = \fork{r^2/2}{if \(r < C\)}{C \cdot (r-C/2)}{otherwise} \quad \text{where} \quad C=1.345\f]
The algorithm is based on the M-estimator ( <http://en.wikipedia.org/wiki/M-estimator> ) technique
that iteratively fits the line using the weighted least-squares algorithm. After each iteration the
weights \f$w_i\f$ are adjusted to be inversely proportional to \f$\rho(r_i)\f$ .
@note
- Function textual ID is "org.opencv.imgproc.shape.fitLine2DMat"
- In case of an N-dimentional points' set given, Mat should be 2-dimensional, have a single row
or column if there are N channels, or have N columns if there is a single channel.
@param src Input set of 2D points stored in one of possible containers: Mat,
std::vector<cv::Point2i>, std::vector<cv::Point2f>, std::vector<cv::Point2d>.
@param distType Distance used by the M-estimator, see #DistanceTypes. @ref DIST_USER
and @ref DIST_C are not suppored.
@param param Numerical parameter ( C ) for some types of distances. If it is 0, an optimal value
is chosen.
@param reps Sufficient accuracy for the radius (distance between the coordinate origin and the
line). 1.0 would be a good default value for reps. If it is 0, a default value is chosen.
@param aeps Sufficient accuracy for the angle. 0.01 would be a good default value for aeps.
If it is 0, a default value is chosen.
@return Output line parameters: a vector of 4 elements (like Vec4f) - (vx, vy, x0, y0),
where (vx, vy) is a normalized vector collinear to the line and (x0, y0) is a point on the line.
*/
GAPI_EXPORTS GOpaque<Vec4f> fitLine2D(const GMat& src, const DistanceTypes distType,
const double param = 0., const double reps = 0.,
const double aeps = 0.);
/** @overload
@note Function textual ID is "org.opencv.imgproc.shape.fitLine2DVector32S"
*/
GAPI_EXPORTS GOpaque<Vec4f> fitLine2D(const GArray<Point2i>& src, const DistanceTypes distType,
const double param = 0., const double reps = 0.,
const double aeps = 0.);
/** @overload
@note Function textual ID is "org.opencv.imgproc.shape.fitLine2DVector32F"
*/
GAPI_EXPORTS GOpaque<Vec4f> fitLine2D(const GArray<Point2f>& src, const DistanceTypes distType,
const double param = 0., const double reps = 0.,
const double aeps = 0.);
/** @overload
@note Function textual ID is "org.opencv.imgproc.shape.fitLine2DVector64F"
*/
GAPI_EXPORTS GOpaque<Vec4f> fitLine2D(const GArray<Point2d>& src, const DistanceTypes distType,
const double param = 0., const double reps = 0.,
const double aeps = 0.);
/** @brief Fits a line to a 3D point set.
The function fits a line to a 3D point set by minimizing \f$\sum_i \rho(r_i)\f$ where
\f$r_i\f$ is a distance between the \f$i^{th}\f$ point, the line and \f$\rho(r)\f$ is a distance
function, one of the following:
- DIST_L2
\f[\rho (r) = r^2/2 \quad \text{(the simplest and the fastest least-squares method)}\f]
- DIST_L1
\f[\rho (r) = r\f]
- DIST_L12
\f[\rho (r) = 2 \cdot ( \sqrt{1 + \frac{r^2}{2}} - 1)\f]
- DIST_FAIR
\f[\rho \left (r \right ) = C^2 \cdot \left ( \frac{r}{C} - \log{\left(1 + \frac{r}{C}\right)} \right ) \quad \text{where} \quad C=1.3998\f]
- DIST_WELSCH
\f[\rho \left (r \right ) = \frac{C^2}{2} \cdot \left ( 1 - \exp{\left(-\left(\frac{r}{C}\right)^2\right)} \right ) \quad \text{where} \quad C=2.9846\f]
- DIST_HUBER
\f[\rho (r) = \fork{r^2/2}{if \(r < C\)}{C \cdot (r-C/2)}{otherwise} \quad \text{where} \quad C=1.345\f]
The algorithm is based on the M-estimator ( <http://en.wikipedia.org/wiki/M-estimator> ) technique
that iteratively fits the line using the weighted least-squares algorithm. After each iteration the
weights \f$w_i\f$ are adjusted to be inversely proportional to \f$\rho(r_i)\f$ .
@note
- Function textual ID is "org.opencv.imgproc.shape.fitLine3DMat"
- In case of an N-dimentional points' set given, Mat should be 2-dimensional, have a single row
or column if there are N channels, or have N columns if there is a single channel.
@param src Input set of 3D points stored in one of possible containers: Mat,
std::vector<cv::Point3i>, std::vector<cv::Point3f>, std::vector<cv::Point3d>.
@param distType Distance used by the M-estimator, see #DistanceTypes. @ref DIST_USER
and @ref DIST_C are not suppored.
@param param Numerical parameter ( C ) for some types of distances. If it is 0, an optimal value
is chosen.
@param reps Sufficient accuracy for the radius (distance between the coordinate origin and the
line). 1.0 would be a good default value for reps. If it is 0, a default value is chosen.
@param aeps Sufficient accuracy for the angle. 0.01 would be a good default value for aeps.
If it is 0, a default value is chosen.
@return Output line parameters: a vector of 6 elements (like Vec6f) - (vx, vy, vz, x0, y0, z0),
where (vx, vy, vz) is a normalized vector collinear to the line and (x0, y0, z0) is a point on
the line.
*/
GAPI_EXPORTS GOpaque<Vec6f> fitLine3D(const GMat& src, const DistanceTypes distType,
const double param = 0., const double reps = 0.,
const double aeps = 0.);
/** @overload
@note Function textual ID is "org.opencv.imgproc.shape.fitLine3DVector32S"
*/
GAPI_EXPORTS GOpaque<Vec6f> fitLine3D(const GArray<Point3i>& src, const DistanceTypes distType,
const double param = 0., const double reps = 0.,
const double aeps = 0.);
/** @overload
@note Function textual ID is "org.opencv.imgproc.shape.fitLine3DVector32F"
*/
GAPI_EXPORTS GOpaque<Vec6f> fitLine3D(const GArray<Point3f>& src, const DistanceTypes distType,
const double param = 0., const double reps = 0.,
const double aeps = 0.);
/** @overload
@note Function textual ID is "org.opencv.imgproc.shape.fitLine3DVector64F"
*/
GAPI_EXPORTS GOpaque<Vec6f> fitLine3D(const GArray<Point3d>& src, const DistanceTypes distType,
const double param = 0., const double reps = 0.,
const double aeps = 0.);
//! @} gapi_shape
//! @addtogroup gapi_colorconvert
//! @{
/** @brief Converts an image from BGR color space to RGB color space.
The function converts an input image from BGR color space to RGB.
The conventional ranges for B, G, and R channel values are 0 to 255.
Output image is 8-bit unsigned 3-channel image @ref CV_8UC3.
@note Function textual ID is "org.opencv.imgproc.colorconvert.bgr2rgb"
@param src input image: 8-bit unsigned 3-channel image @ref CV_8UC3.
@sa RGB2BGR
*/
GAPI_EXPORTS GMat BGR2RGB(const GMat& src);
/** @brief Converts an image from RGB color space to gray-scaled.
The conventional ranges for R, G, and B channel values are 0 to 255.
Resulting gray color value computed as
\f[\texttt{dst} (I)= \texttt{0.299} * \texttt{src}(I).R + \texttt{0.587} * \texttt{src}(I).G + \texttt{0.114} * \texttt{src}(I).B \f]
@ -826,7 +1354,7 @@ Resulting gray color value computed as
@param src input image: 8-bit unsigned 3-channel image @ref CV_8UC1.
@sa RGB2YUV
*/
GAPI_EXPORTS GMat RGB2Gray(const GMat& src);
GAPI_EXPORTS_W GMat RGB2Gray(const GMat& src);
/** @overload
Resulting gray color value computed as
@ -843,6 +1371,7 @@ Resulting gray color value computed as
GAPI_EXPORTS GMat RGB2Gray(const GMat& src, float rY, float gY, float bY);
/** @brief Converts an image from BGR color space to gray-scaled.
The conventional ranges for B, G, and R channel values are 0 to 255.
Resulting gray color value computed as
\f[\texttt{dst} (I)= \texttt{0.114} * \texttt{src}(I).B + \texttt{0.587} * \texttt{src}(I).G + \texttt{0.299} * \texttt{src}(I).R \f]
@ -871,6 +1400,70 @@ Output image must be 8-bit unsigned 3-channel image @ref CV_8UC3.
*/
GAPI_EXPORTS GMat RGB2YUV(const GMat& src);
/** @brief Converts an image from BGR color space to I420 color space.
The function converts an input image from BGR color space to I420.
The conventional ranges for R, G, and B channel values are 0 to 255.
Output image must be 8-bit unsigned 1-channel image. @ref CV_8UC1.
Width of I420 output image must be the same as width of input image.
Height of I420 output image must be equal 3/2 from height of input image.
@note Function textual ID is "org.opencv.imgproc.colorconvert.bgr2i420"
@param src input image: 8-bit unsigned 3-channel image @ref CV_8UC3.
@sa I4202BGR
*/
GAPI_EXPORTS GMat BGR2I420(const GMat& src);
/** @brief Converts an image from RGB color space to I420 color space.
The function converts an input image from RGB color space to I420.
The conventional ranges for R, G, and B channel values are 0 to 255.
Output image must be 8-bit unsigned 1-channel image. @ref CV_8UC1.
Width of I420 output image must be the same as width of input image.
Height of I420 output image must be equal 3/2 from height of input image.
@note Function textual ID is "org.opencv.imgproc.colorconvert.rgb2i420"
@param src input image: 8-bit unsigned 3-channel image @ref CV_8UC3.
@sa I4202RGB
*/
GAPI_EXPORTS GMat RGB2I420(const GMat& src);
/** @brief Converts an image from I420 color space to BGR color space.
The function converts an input image from I420 color space to BGR.
The conventional ranges for B, G, and R channel values are 0 to 255.
Output image must be 8-bit unsigned 3-channel image. @ref CV_8UC3.
Width of BGR output image must be the same as width of input image.
Height of BGR output image must be equal 2/3 from height of input image.
@note Function textual ID is "org.opencv.imgproc.colorconvert.i4202bgr"
@param src input image: 8-bit unsigned 1-channel image @ref CV_8UC1.
@sa BGR2I420
*/
GAPI_EXPORTS GMat I4202BGR(const GMat& src);
/** @brief Converts an image from I420 color space to BGR color space.
The function converts an input image from I420 color space to BGR.
The conventional ranges for B, G, and R channel values are 0 to 255.
Output image must be 8-bit unsigned 3-channel image. @ref CV_8UC3.
Width of RGB output image must be the same as width of input image.
Height of RGB output image must be equal 2/3 from height of input image.
@note Function textual ID is "org.opencv.imgproc.colorconvert.i4202rgb"
@param src input image: 8-bit unsigned 1-channel image @ref CV_8UC1.
@sa RGB2I420
*/
GAPI_EXPORTS GMat I4202RGB(const GMat& src);
/** @brief Converts an image from BGR color space to LUV color space.
The function converts an input image from BGR color space to LUV.

View File

@ -2,7 +2,7 @@
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Copyright (C) 2019 Intel Corporation
// Copyright (C) 2019-2020 Intel Corporation
#ifndef OPENCV_GAPI_INFER_HPP
@ -16,6 +16,7 @@
#include <utility> // tuple
#include <type_traits> // is_same, false_type
#include <opencv2/gapi/util/util.hpp> // all_satisfy
#include <opencv2/gapi/util/any.hpp> // any<>
#include <opencv2/gapi/gkernel.hpp> // GKernelType[M], GBackend
#include <opencv2/gapi/garg.hpp> // GArg
@ -27,40 +28,54 @@ namespace cv {
template<typename, typename> class GNetworkType;
namespace detail {
template<typename, typename>
struct valid_infer2_types;
// Terminal case 1 (50/50 success)
template<typename T>
struct valid_infer2_types< std::tuple<cv::GMat>, std::tuple<T> > {
// By default, Nets are limited to GMat argument types only
// for infer2, every GMat argument may translate to either
// GArray<GMat> or GArray<Rect>. GArray<> part is stripped
// already at this point.
static constexpr const auto value =
std::is_same<typename std::decay<T>::type, cv::GMat>::value
|| std::is_same<typename std::decay<T>::type, cv::Rect>::value;
};
// Infer ///////////////////////////////////////////////////////////////////////
template<typename T>
struct accepted_infer_types {
static constexpr const auto value =
std::is_same<typename std::decay<T>::type, cv::GMat>::value
|| std::is_same<typename std::decay<T>::type, cv::GFrame>::value;
};
// Terminal case 2 (100% failure)
template<typename... Ts>
struct valid_infer2_types< std::tuple<>, std::tuple<Ts...> >
: public std::false_type {
};
template<typename... Ts>
using valid_infer_types = all_satisfy<accepted_infer_types, Ts...>;
// Terminal case 3 (100% failure)
template<typename... Ns>
struct valid_infer2_types< std::tuple<Ns...>, std::tuple<> >
: public std::false_type {
};
// Infer2 //////////////////////////////////////////////////////////////////////
// Recursion -- generic
template<typename... Ns, typename T, typename...Ts>
struct valid_infer2_types< std::tuple<cv::GMat,Ns...>, std::tuple<T,Ts...> > {
static constexpr const auto value =
valid_infer2_types< std::tuple<cv::GMat>, std::tuple<T> >::value
&& valid_infer2_types< std::tuple<Ns...>, std::tuple<Ts...> >::value;
};
template<typename, typename>
struct valid_infer2_types;
// Terminal case 1 (50/50 success)
template<typename T>
struct valid_infer2_types< std::tuple<cv::GMat>, std::tuple<T> > {
// By default, Nets are limited to GMat argument types only
// for infer2, every GMat argument may translate to either
// GArray<GMat> or GArray<Rect>. GArray<> part is stripped
// already at this point.
static constexpr const auto value =
std::is_same<typename std::decay<T>::type, cv::GMat>::value
|| std::is_same<typename std::decay<T>::type, cv::Rect>::value;
};
// Terminal case 2 (100% failure)
template<typename... Ts>
struct valid_infer2_types< std::tuple<>, std::tuple<Ts...> >
: public std::false_type {
};
// Terminal case 3 (100% failure)
template<typename... Ns>
struct valid_infer2_types< std::tuple<Ns...>, std::tuple<> >
: public std::false_type {
};
// Recursion -- generic
template<typename... Ns, typename T, typename...Ts>
struct valid_infer2_types< std::tuple<cv::GMat,Ns...>, std::tuple<T,Ts...> > {
static constexpr const auto value =
valid_infer2_types< std::tuple<cv::GMat>, std::tuple<T> >::value
&& valid_infer2_types< std::tuple<Ns...>, std::tuple<Ts...> >::value;
};
} // namespace detail
// TODO: maybe tuple_wrap_helper from util.hpp may help with this.
@ -76,7 +91,6 @@ public:
using API = std::function<Result(Args...)>;
using ResultL = std::tuple< cv::GArray<R>... >;
using APIList = std::function<ResultL(cv::GArray<cv::Rect>, Args...)>;
};
// Single-return-value network definition (specialized base class)
@ -91,17 +105,48 @@ public:
using API = std::function<R(Args...)>;
using ResultL = cv::GArray<R>;
using APIList = std::function<ResultL(cv::GArray<cv::Rect>, Args...)>;
};
// InferAPI: Accepts either GMat or GFrame for very individual network's input
template<class Net, class... Ts>
struct InferAPI {
using type = typename std::enable_if
< detail::valid_infer_types<Ts...>::value
&& std::tuple_size<typename Net::InArgs>::value == sizeof...(Ts)
, std::function<typename Net::Result(Ts...)>
>::type;
};
// InferAPIRoi: Accepts a rectangle and either GMat or GFrame
template<class Net, class T>
struct InferAPIRoi {
using type = typename std::enable_if
< detail::valid_infer_types<T>::value
&& std::tuple_size<typename Net::InArgs>::value == 1u
, std::function<typename Net::Result(cv::GOpaque<cv::Rect>, T)>
>::type;
};
// InferAPIList: Accepts a list of rectangles and list of GMat/GFrames;
// crops every input.
template<class Net, class... Ts>
struct InferAPIList {
using type = typename std::enable_if
< detail::valid_infer_types<Ts...>::value
&& std::tuple_size<typename Net::InArgs>::value == sizeof...(Ts)
, std::function<typename Net::ResultL(cv::GArray<cv::Rect>, Ts...)>
>::type;
};
// APIList2 is also template to allow different calling options
// (GArray<cv::Rect> vs GArray<cv::GMat> per input)
template<class Net, class... Ts>
template<class Net, typename T, class... Ts>
struct InferAPIList2 {
using type = typename std::enable_if
< cv::detail::valid_infer2_types< typename Net::InArgs
< detail::valid_infer_types<T>::value &&
cv::detail::valid_infer2_types< typename Net::InArgs
, std::tuple<Ts...> >::value,
std::function<typename Net::ResultL(cv::GMat, cv::GArray<Ts>...)>
std::function<typename Net::ResultL(T, cv::GArray<Ts>...)>
>::type;
};
@ -114,22 +159,75 @@ struct InferAPIList2 {
// a particular backend, not by a network itself.
struct GInferBase {
static constexpr const char * id() {
return "org.opencv.dnn.infer"; // Universal stub
return "org.opencv.dnn.infer"; // Universal stub
}
static GMetaArgs getOutMeta(const GMetaArgs &, const GArgs &) {
return GMetaArgs{}; // One more universal stub
return GMetaArgs{}; // One more universal stub
}
};
// Struct stores network input/output names.
// Used by infer<Generic>
struct InOutInfo
{
std::vector<std::string> in_names;
std::vector<std::string> out_names;
};
/**
* @{
* @brief G-API object used to collect network inputs
*/
class GAPI_EXPORTS_W_SIMPLE GInferInputs
{
using Map = std::unordered_map<std::string, GMat>;
public:
GAPI_WRAP GInferInputs();
GAPI_WRAP void setInput(const std::string& name, const cv::GMat& value);
cv::GMat& operator[](const std::string& name);
const Map& getBlobs() const;
private:
std::shared_ptr<Map> in_blobs;
};
/** @} */
/**
* @{
* @brief G-API object used to collect network outputs
*/
struct GAPI_EXPORTS_W_SIMPLE GInferOutputs
{
public:
GAPI_WRAP GInferOutputs() = default;
GInferOutputs(std::shared_ptr<cv::GCall> call);
GAPI_WRAP cv::GMat at(const std::string& name);
private:
struct Priv;
std::shared_ptr<Priv> m_priv;
};
/** @} */
// Base "InferROI" kernel.
// All notes from "Infer" kernel apply here as well.
struct GInferROIBase {
static constexpr const char * id() {
return "org.opencv.dnn.infer-roi"; // Universal stub
}
static GMetaArgs getOutMeta(const GMetaArgs &, const GArgs &) {
return GMetaArgs{}; // One more universal stub
}
};
// Base "Infer list" kernel.
// All notes from "Infer" kernel apply here as well.
struct GInferListBase {
static constexpr const char * id() {
return "org.opencv.dnn.infer-roi"; // Universal stub
return "org.opencv.dnn.infer-roi-list-1"; // Universal stub
}
static GMetaArgs getOutMeta(const GMetaArgs &, const GArgs &) {
return GMetaArgs{}; // One more universal stub
return GMetaArgs{}; // One more universal stub
}
};
@ -137,33 +235,46 @@ struct GInferListBase {
// All notes from "Infer" kernel apply here as well.
struct GInferList2Base {
static constexpr const char * id() {
return "org.opencv.dnn.infer-roi-list"; // Universal stub
return "org.opencv.dnn.infer-roi-list-2"; // Universal stub
}
static GMetaArgs getOutMeta(const GMetaArgs &, const GArgs &) {
return GMetaArgs{}; // One more universal stub
return GMetaArgs{}; // One more universal stub
}
};
// A generic inference kernel. API (::on()) is fully defined by the Net
// template parameter.
// Acts as a regular kernel in graph (via KernelTypeMedium).
template<typename Net>
template<typename Net, typename... Args>
struct GInfer final
: public GInferBase
, public detail::KernelTypeMedium< GInfer<Net>
, typename Net::API > {
, public detail::KernelTypeMedium< GInfer<Net, Args...>
, typename InferAPI<Net, Args...>::type > {
using GInferBase::getOutMeta; // FIXME: name lookup conflict workaround?
static constexpr const char* tag() { return Net::tag(); }
};
// A specific roi-inference kernel. API (::on()) is fixed here and
// verified against Net.
template<typename Net, typename T>
struct GInferROI final
: public GInferROIBase
, public detail::KernelTypeMedium< GInferROI<Net, T>
, typename InferAPIRoi<Net, T>::type > {
using GInferROIBase::getOutMeta; // FIXME: name lookup conflict workaround?
static constexpr const char* tag() { return Net::tag(); }
};
// A generic roi-list inference kernel. API (::on()) is derived from
// the Net template parameter (see more in infer<> overload).
template<typename Net>
template<typename Net, typename... Args>
struct GInferList final
: public GInferListBase
, public detail::KernelTypeMedium< GInferList<Net>
, typename Net::APIList > {
, public detail::KernelTypeMedium< GInferList<Net, Args...>
, typename InferAPIList<Net, Args...>::type > {
using GInferListBase::getOutMeta; // FIXME: name lookup conflict workaround?
static constexpr const char* tag() { return Net::tag(); }
@ -174,11 +285,11 @@ struct GInferList final
// overload).
// Takes an extra variadic template list to reflect how this network
// was called (with Rects or GMats as array parameters)
template<typename Net, typename... Args>
template<typename Net, typename T, typename... Args>
struct GInferList2 final
: public GInferList2Base
, public detail::KernelTypeMedium< GInferList2<Net, Args...>
, typename InferAPIList2<Net, Args...>::type > {
, public detail::KernelTypeMedium< GInferList2<Net, T, Args...>
, typename InferAPIList2<Net, T, Args...>::type > {
using GInferList2Base::getOutMeta; // FIXME: name lookup conflict workaround?
static constexpr const char* tag() { return Net::tag(); }
@ -195,6 +306,23 @@ struct GInferList2 final
namespace cv {
namespace gapi {
/** @brief Calculates response for the specified network (template
* parameter) for the specified region in the source image.
* Currently expects a single-input network only.
*
* @tparam A network type defined with G_API_NET() macro.
* @param in input image where to take ROI from.
* @param roi an object describing the region of interest
* in the source image. May be calculated in the same graph dynamically.
* @return an object of return type as defined in G_API_NET().
* If a network has multiple return values (defined with a tuple), a tuple of
* objects of appropriate type is returned.
* @sa G_API_NET()
*/
template<typename Net, typename T>
typename Net::Result infer(cv::GOpaque<cv::Rect> roi, T in) {
return GInferROI<Net, T>::on(roi, in);
}
/** @brief Calculates responses for the specified network (template
* parameter) for every region in the source image.
@ -211,7 +339,7 @@ namespace gapi {
*/
template<typename Net, typename... Args>
typename Net::ResultL infer(cv::GArray<cv::Rect> roi, Args&&... args) {
return GInferList<Net>::on(roi, std::forward<Args>(args)...);
return GInferList<Net, Args...>::on(roi, std::forward<Args>(args)...);
}
/** @brief Calculates responses for the specified network (template
@ -231,11 +359,12 @@ typename Net::ResultL infer(cv::GArray<cv::Rect> roi, Args&&... args) {
* GArray<> objects is returned with the appropriate types inside.
* @sa G_API_NET()
*/
template<typename Net, typename... Args>
typename Net::ResultL infer2(cv::GMat image, cv::GArray<Args>... args) {
template<typename Net, typename T, typename... Args>
typename Net::ResultL infer2(T image, cv::GArray<Args>... args) {
// FIXME: Declared as "2" because in the current form it steals
// overloads from the regular infer
return GInferList2<Net, Args...>::on(image, args...);
return GInferList2<Net, T, Args...>::on(image, args...);
}
/**
@ -251,9 +380,54 @@ typename Net::ResultL infer2(cv::GMat image, cv::GArray<Args>... args) {
*/
template<typename Net, typename... Args>
typename Net::Result infer(Args&&... args) {
return GInfer<Net>::on(std::forward<Args>(args)...);
return GInfer<Net, Args...>::on(std::forward<Args>(args)...);
}
/**
* @brief Special network type
*/
struct Generic { };
/**
* @brief Calculates response for generic network
*
* @param tag a network tag
* @param inputs networks's inputs
* @return a GInferOutputs
*/
template<typename T = Generic> GInferOutputs
infer(const std::string& tag, const GInferInputs& inputs)
{
std::vector<GArg> input_args;
std::vector<std::string> input_names;
const auto& blobs = inputs.getBlobs();
for (auto&& p : blobs)
{
input_names.push_back(p.first);
input_args.emplace_back(p.second);
}
GKinds kinds(blobs.size(), cv::detail::OpaqueKind::CV_MAT);
auto call = std::make_shared<cv::GCall>(GKernel{
GInferBase::id(),
tag,
GInferBase::getOutMeta,
{}, // outShape will be filled later
std::move(kinds),
{}, // outCtors will be filled later
});
call->setArgs(std::move(input_args));
call->params() = InOutInfo{input_names, {}};
return GInferOutputs{std::move(call)};
}
GAPI_EXPORTS_W inline GInferOutputs infer(const String& name, const GInferInputs& inputs)
{
return infer<Generic>(name, inputs);
}
} // namespace gapi
} // namespace cv
@ -283,9 +457,9 @@ struct GAPI_EXPORTS GNetParam {
*
* @sa cv::gapi::networks
*/
struct GAPI_EXPORTS GNetPackage {
GNetPackage() : GNetPackage({}) {}
explicit GNetPackage(std::initializer_list<GNetParam> &&ii);
struct GAPI_EXPORTS_W_SIMPLE GNetPackage {
GAPI_WRAP GNetPackage() = default;
explicit GNetPackage(std::initializer_list<GNetParam> ii);
std::vector<GBackend> backends() const;
std::vector<GNetParam> networks;
};

View File

@ -0,0 +1,56 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Copyright (C) 2020 Intel Corporation
#ifndef OPENCV_GAPI_INFER_BINDINGS_IE_HPP
#define OPENCV_GAPI_INFER_BINDINGS_IE_HPP
#include <opencv2/gapi/util/any.hpp>
#include "opencv2/gapi/own/exports.hpp" // GAPI_EXPORTS
#include <opencv2/gapi/gkernel.hpp> // GKernelPackage
#include <opencv2/gapi/infer/ie.hpp> // Params
#include <string>
namespace cv {
namespace gapi {
namespace ie {
// NB: Used by python wrapper
// This class can be marked as SIMPLE, because it's implemented as pimpl
class GAPI_EXPORTS_W_SIMPLE PyParams {
public:
PyParams() = default;
PyParams(const std::string &tag,
const std::string &model,
const std::string &weights,
const std::string &device);
PyParams(const std::string &tag,
const std::string &model,
const std::string &device);
GBackend backend() const;
std::string tag() const;
cv::util::any params() const;
private:
std::shared_ptr<Params<cv::gapi::Generic>> m_priv;
};
GAPI_EXPORTS_W PyParams params(const std::string &tag,
const std::string &model,
const std::string &weights,
const std::string &device);
GAPI_EXPORTS_W PyParams params(const std::string &tag,
const std::string &model,
const std::string &device);
} // namespace ie
} // namespace gapi
} // namespace cv
#endif // OPENCV_GAPI_INFER_BINDINGS_IE_HPP

View File

@ -11,12 +11,14 @@
#include <string>
#include <array>
#include <tuple> // tuple, tuple_size
#include <map>
#include <opencv2/gapi/opencv_includes.hpp>
#include <opencv2/gapi/util/any.hpp>
#include <opencv2/core/cvdef.h> // GAPI_EXPORTS
#include <opencv2/gapi/gkernel.hpp> // GKernelPackage
#include <opencv2/gapi/infer.hpp> // Generic
namespace cv {
namespace gapi {
@ -41,6 +43,8 @@ enum class TraitAs: int
IMAGE //!< G-API traits an associated cv::Mat as an image so creates an "image" blob (NCHW/NHWC, etc)
};
using IEConfig = std::map<std::string, std::string>;
namespace detail {
struct ParamDesc {
std::string model_path;
@ -58,6 +62,11 @@ namespace detail {
// (e.g. topology's partial execution)
std::size_t num_in; // How many inputs are defined in the operation
std::size_t num_out; // How many outputs are defined in the operation
enum class Kind { Load, Import };
Kind kind;
bool is_generic;
IEConfig config;
};
} // namespace detail
@ -80,7 +89,19 @@ public:
: desc{ model, weights, device, {}, {}, {}
, std::tuple_size<typename Net::InArgs>::value // num_in
, std::tuple_size<typename Net::OutArgs>::value // num_out
} {
, detail::ParamDesc::Kind::Load
, false
, {}} {
};
Params(const std::string &model,
const std::string &device)
: desc{ model, {}, device, {}, {}, {}
, std::tuple_size<typename Net::InArgs>::value // num_in
, std::tuple_size<typename Net::OutArgs>::value // num_out
, detail::ParamDesc::Kind::Import
, false
, {}} {
};
Params<Net>& cfgInputLayers(const typename PortCfg<Net>::In &ll) {
@ -106,18 +127,65 @@ public:
return *this;
}
Params& pluginConfig(IEConfig&& cfg) {
desc.config = std::move(cfg);
return *this;
}
Params& pluginConfig(const IEConfig& cfg) {
desc.config = cfg;
return *this;
}
// BEGIN(G-API's network parametrization API)
GBackend backend() const { return cv::gapi::ie::backend(); }
std::string tag() const { return Net::tag(); }
cv::util::any params() const { return { desc }; }
GBackend backend() const { return cv::gapi::ie::backend(); }
std::string tag() const { return Net::tag(); }
cv::util::any params() const { return { desc }; }
// END(G-API's network parametrization API)
protected:
detail::ParamDesc desc;
};
template<>
class Params<cv::gapi::Generic> {
public:
Params(const std::string &tag,
const std::string &model,
const std::string &weights,
const std::string &device)
: desc{ model, weights, device, {}, {}, {}, 0u, 0u, detail::ParamDesc::Kind::Load, true, {}}, m_tag(tag) {
};
Params(const std::string &tag,
const std::string &model,
const std::string &device)
: desc{ model, {}, device, {}, {}, {}, 0u, 0u, detail::ParamDesc::Kind::Import, true, {}}, m_tag(tag) {
};
Params& pluginConfig(IEConfig&& cfg) {
desc.config = std::move(cfg);
return *this;
}
Params& pluginConfig(const IEConfig& cfg) {
desc.config = cfg;
return *this;
}
// BEGIN(G-API's network parametrization API)
GBackend backend() const { return cv::gapi::ie::backend(); }
std::string tag() const { return m_tag; }
cv::util::any params() const { return { desc }; }
// END(G-API's network parametrization API)
protected:
detail::ParamDesc desc;
std::string m_tag;
};
} // namespace ie
} // namespace gapi
} // namespace cv
#endif // OPENCV_GAPI_INFER_HPP
#endif // OPENCV_GAPI_INFER_IE_HPP

View File

@ -0,0 +1,138 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Copyright (C) 2020 Intel Corporation
#ifndef OPENCV_GAPI_INFER_ONNX_HPP
#define OPENCV_GAPI_INFER_ONNX_HPP
#include <unordered_map>
#include <string>
#include <array>
#include <tuple> // tuple, tuple_size
#include <opencv2/gapi/opencv_includes.hpp>
#include <opencv2/gapi/util/any.hpp>
#include <opencv2/core/cvdef.h> // GAPI_EXPORTS
#include <opencv2/gapi/gkernel.hpp> // GKernelPackage
namespace cv {
namespace gapi {
namespace onnx {
GAPI_EXPORTS cv::gapi::GBackend backend();
enum class TraitAs: int {
TENSOR, //!< G-API traits an associated cv::Mat as a raw tensor
// and passes dimensions as-is
IMAGE //!< G-API traits an associated cv::Mat as an image so
// creates an "image" blob (NCHW/NHWC, etc)
};
using PostProc = std::function<void(const std::unordered_map<std::string, cv::Mat> &,
std::unordered_map<std::string, cv::Mat> &)>;
namespace detail {
struct ParamDesc {
std::string model_path;
// NB: nun_* may differ from topology's real input/output port numbers
// (e.g. topology's partial execution)
std::size_t num_in; // How many inputs are defined in the operation
std::size_t num_out; // How many outputs are defined in the operation
// NB: Here order follows the `Net` API
std::vector<std::string> input_names;
std::vector<std::string> output_names;
using ConstInput = std::pair<cv::Mat, TraitAs>;
std::unordered_map<std::string, ConstInput> const_inputs;
std::vector<cv::Scalar> mean;
std::vector<cv::Scalar> stdev;
std::vector<cv::GMatDesc> out_metas;
PostProc custom_post_proc;
std::vector<bool> normalize;
};
} // namespace detail
template<typename Net>
struct PortCfg {
using In = std::array
< std::string
, std::tuple_size<typename Net::InArgs>::value >;
using Out = std::array
< std::string
, std::tuple_size<typename Net::OutArgs>::value >;
using NormCoefs = std::array
< cv::Scalar
, std::tuple_size<typename Net::InArgs>::value >;
using Normalize = std::array
< bool
, std::tuple_size<typename Net::InArgs>::value >;
};
template<typename Net> class Params {
public:
Params(const std::string &model) {
desc.model_path = model;
desc.num_in = std::tuple_size<typename Net::InArgs>::value;
desc.num_out = std::tuple_size<typename Net::OutArgs>::value;
};
// BEGIN(G-API's network parametrization API)
GBackend backend() const { return cv::gapi::onnx::backend(); }
std::string tag() const { return Net::tag(); }
cv::util::any params() const { return { desc }; }
// END(G-API's network parametrization API)
Params<Net>& cfgInputLayers(const typename PortCfg<Net>::In &ll) {
desc.input_names.assign(ll.begin(), ll.end());
return *this;
}
Params<Net>& cfgOutputLayers(const typename PortCfg<Net>::Out &ll) {
desc.output_names.assign(ll.begin(), ll.end());
return *this;
}
Params<Net>& constInput(const std::string &layer_name,
const cv::Mat &data,
TraitAs hint = TraitAs::TENSOR) {
desc.const_inputs[layer_name] = {data, hint};
return *this;
}
Params<Net>& cfgMeanStd(const typename PortCfg<Net>::NormCoefs &m,
const typename PortCfg<Net>::NormCoefs &s) {
desc.mean.assign(m.begin(), m.end());
desc.stdev.assign(s.begin(), s.end());
return *this;
}
Params<Net>& cfgPostProc(const std::vector<cv::GMatDesc> &outs,
const PostProc &pp) {
desc.out_metas = outs;
desc.custom_post_proc = pp;
return *this;
}
Params<Net>& cfgNormalize(const typename PortCfg<Net>::Normalize &n) {
desc.normalize.assign(n.begin(), n.end());
return *this;
}
protected:
detail::ParamDesc desc;
};
} // namespace onnx
} // namespace gapi
} // namespace cv
#endif // OPENCV_GAPI_INFER_HPP

View File

@ -108,8 +108,8 @@ detection is smaller than confidence threshold, detection is rejected.
relative box intersection area required for rejecting the box with a smaller confidence.
If 1.f, nms is not performed and no boxes are rejected.
@param anchors Anchors Yolo network was trained with.
@note The default anchor values are taken from openvinotoolkit docs:
https://docs.openvinotoolkit.org/latest/omz_models_intel_yolo_v2_tiny_vehicle_detection_0001_description_yolo_v2_tiny_vehicle_detection_0001.html#output.
@note The default anchor values are specified for YOLO v2 Tiny as described in Intel Open Model Zoo
<a href="https://github.com/openvinotoolkit/open_model_zoo/blob/master/models/public/yolo-v2-tiny-tf/yolo-v2-tiny-tf.md">documentation</a>.
@return a tuple with a vector of detected boxes and a vector of appropriate labels.
*/
GAPI_EXPORTS std::tuple<GArray<Rect>, GArray<int>> parseYolo(const GMat& in,
@ -122,4 +122,16 @@ GAPI_EXPORTS std::tuple<GArray<Rect>, GArray<int>> parseYolo(const GMat& in,
} // namespace gapi
} // namespace cv
// Reimport parseSSD & parseYolo under their initial namespace
namespace cv {
namespace gapi {
namespace streaming {
using cv::gapi::parseSSD;
using cv::gapi::parseYolo;
} // namespace streaming
} // namespace gapi
} // namespace cv
#endif // OPENCV_GAPI_PARSERS_HPP

View File

@ -0,0 +1,73 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Copyright (C) 2020 Intel Corporation
#ifndef OPENCV_GAPI_MEDIA_HPP
#define OPENCV_GAPI_MEDIA_HPP
#include <memory> // unique_ptr<>, shared_ptr<>
#include <array> // array<>
#include <functional> // function<>
#include <utility> // forward<>()
#include <opencv2/gapi/gframe.hpp>
namespace cv {
class GAPI_EXPORTS MediaFrame {
public:
enum class Access { R, W };
class IAdapter;
class View;
using AdapterPtr = std::unique_ptr<IAdapter>;
MediaFrame();
explicit MediaFrame(AdapterPtr &&);
template<class T, class... Args> static cv::MediaFrame Create(Args&&...);
View access(Access) const;
cv::GFrameDesc desc() const;
private:
struct Priv;
std::shared_ptr<Priv> m;
};
template<class T, class... Args>
inline cv::MediaFrame cv::MediaFrame::Create(Args&&... args) {
std::unique_ptr<T> ptr(new T(std::forward<Args>(args)...));
return cv::MediaFrame(std::move(ptr));
}
class GAPI_EXPORTS MediaFrame::View final {
public:
static constexpr const size_t MAX_PLANES = 4;
using Ptrs = std::array<void*, MAX_PLANES>;
using Strides = std::array<std::size_t, MAX_PLANES>; // in bytes
using Callback = std::function<void()>;
View(Ptrs&& ptrs, Strides&& strs, Callback &&cb = [](){});
View(const View&) = delete;
View(View&&) = default;
View& operator = (const View&) = delete;
~View();
Ptrs ptr;
Strides stride;
private:
Callback m_cb;
};
class GAPI_EXPORTS MediaFrame::IAdapter {
public:
virtual ~IAdapter() = 0;
virtual cv::GFrameDesc meta() const = 0;
virtual MediaFrame::View access(MediaFrame::Access) = 0;
};
} //namespace cv
#endif // OPENCV_GAPI_MEDIA_HPP

View File

@ -2,7 +2,7 @@
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Copyright (C) 2018-2019 Intel Corporation
// Copyright (C) 2018-2020 Intel Corporation
#ifndef OPENCV_GAPI_GOCLKERNEL_HPP
@ -75,7 +75,7 @@ public:
protected:
detail::VectorRef& outVecRef(int output);
detail::VectorRef& outOpaqueRef(int output);
detail::OpaqueRef& outOpaqueRef(int output);
std::vector<GArg> m_args;
std::unordered_map<std::size_t, GRunArgP> m_results;

View File

@ -21,11 +21,12 @@
# include <opencv2/gapi/own/mat.hpp>
// replacement of cv's structures:
namespace cv {
using Rect = gapi::own::Rect;
using Size = gapi::own::Size;
using Point = gapi::own::Point;
using Scalar = gapi::own::Scalar;
using Mat = gapi::own::Mat;
using Rect = gapi::own::Rect;
using Size = gapi::own::Size;
using Point = gapi::own::Point;
using Point2f = gapi::own::Point2f;
using Scalar = gapi::own::Scalar;
using Mat = gapi::own::Mat;
} // namespace cv
#endif // !defined(GAPI_STANDALONE)

View File

@ -2,16 +2,28 @@
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Copyright (C) 2018 Intel Corporation
// Copyright (C) 2018-2020 Intel Corporation
#ifndef OPENCV_GAPI_OWN_ASSERT_HPP
#define OPENCV_GAPI_OWN_ASSERT_HPP
#include <opencv2/gapi/util/compiler_hints.hpp>
#define GAPI_DbgAssertNoOp(expr) { \
constexpr bool _assert_tmp = false && (expr); \
cv::util::suppress_unused_warning(_assert_tmp); \
}
#if !defined(GAPI_STANDALONE)
#include <opencv2/core/base.hpp>
#define GAPI_Assert CV_Assert
#define GAPI_DbgAssert CV_DbgAssert
#if defined _DEBUG || defined CV_STATIC_ANALYSIS
# define GAPI_DbgAssert CV_DbgAssert
#else
# define GAPI_DbgAssert(expr) GAPI_DbgAssertNoOp(expr)
#endif
#else
#include <stdexcept>
@ -33,7 +45,7 @@ namespace detail
#ifdef NDEBUG
# define GAPI_DbgAssert(expr)
# define GAPI_DbgAssert(expr) GAPI_DbgAssertNoOp(expr)
#else
# define GAPI_DbgAssert(expr) GAPI_Assert(expr)
#endif

View File

@ -254,6 +254,18 @@ namespace cv { namespace gapi { namespace own {
*this = std::move(tmp);
}
/** @brief Creates a full copy of the matrix and the underlying data.
The method creates a full copy of the matrix. The original step[] is not taken into account.
So, the copy has a continuous buffer occupying total() * elemSize() bytes.
*/
Mat clone() const
{
Mat m;
copyTo(m);
return m;
}
/** @brief Copies the matrix to another one.
The method copies the matrix data to another matrix. Before copying the data, the method invokes :

View File

@ -28,6 +28,16 @@ public:
int y = 0;
};
class Point2f
{
public:
Point2f() = default;
Point2f(float _x, float _y) : x(_x), y(_y) {};
float x = 0.f;
float y = 0.f;
};
class Rect
{
public:

View File

@ -252,7 +252,7 @@ struct Mosaic
{
}
Mosaic() = default;
Mosaic() : cellSz(0), decim(0) {}
/*@{*/
cv::Rect mos; //!< Coordinates of the mosaic

View File

@ -8,6 +8,17 @@
#define OPENCV_GAPI_RMAT_HPP
#include <opencv2/gapi/gmat.hpp>
#include <opencv2/gapi/own/exports.hpp>
// Forward declaration
namespace cv {
namespace gapi {
namespace s11n {
struct IOStream;
struct IIStream;
} // namespace s11n
} // namespace gapi
} // namespace cv
namespace cv {
@ -31,7 +42,7 @@ namespace cv {
// performCalculations(in_view, out_view);
// // data from out_view is transferred to the device when out_view is destroyed
// }
class RMat
class GAPI_EXPORTS RMat
{
public:
// A lightweight wrapper on image data:
@ -39,43 +50,50 @@ public:
// - Doesn't implement copy semantics (it's assumed that a view is created each time
// wrapped data is being accessed);
// - Has an optional callback which is called when the view is destroyed.
class View
class GAPI_EXPORTS View
{
public:
using DestroyCallback = std::function<void()>;
using stepsT = std::vector<size_t>;
View() = default;
View(const GMatDesc& desc, uchar* data, size_t step = 0u, DestroyCallback&& cb = nullptr)
: m_desc(desc), m_data(data), m_step(step == 0u ? elemSize()*cols() : step), m_cb(cb)
{}
View(const GMatDesc& desc, uchar* data, const stepsT& steps = {}, DestroyCallback&& cb = nullptr);
View(const GMatDesc& desc, uchar* data, size_t step, DestroyCallback&& cb = nullptr);
View(const View&) = delete;
View(View&&) = default;
View& operator=(const View&) = delete;
View& operator=(View&&) = default;
View(View&&) = default;
View& operator=(View&& v);
~View() { if (m_cb) m_cb(); }
cv::Size size() const { return m_desc.size; }
const std::vector<int>& dims() const { return m_desc.dims; }
int cols() const { return m_desc.size.width; }
int rows() const { return m_desc.size.height; }
int type() const { return CV_MAKE_TYPE(depth(), chan()); }
int type() const;
int depth() const { return m_desc.depth; }
int chan() const { return m_desc.chan; }
size_t elemSize() const { return CV_ELEM_SIZE(type()); }
template<typename T = uchar> T* ptr(int y = 0, int x = 0) {
return reinterpret_cast<T*>(m_data + m_step*y + x*CV_ELEM_SIZE(type()));
template<typename T = uchar> T* ptr(int y = 0) {
return reinterpret_cast<T*>(m_data + step()*y);
}
template<typename T = uchar> const T* ptr(int y = 0, int x = 0) const {
return reinterpret_cast<const T*>(m_data + m_step*y + x*CV_ELEM_SIZE(type()));
template<typename T = uchar> const T* ptr(int y = 0) const {
return reinterpret_cast<T*>(m_data + step()*y);
}
size_t step() const { return m_step; }
template<typename T = uchar> T* ptr(int y, int x) {
return reinterpret_cast<T*>(m_data + step()*y + step(1)*x);
}
template<typename T = uchar> const T* ptr(int y, int x) const {
return reinterpret_cast<const T*>(m_data + step()*y + step(1)*x);
}
size_t step(size_t i = 0) const { GAPI_DbgAssert(i<m_steps.size()); return m_steps[i]; }
const stepsT& steps() const { return m_steps; }
private:
GMatDesc m_desc;
uchar* m_data = nullptr;
size_t m_step = 0u;
stepsT m_steps = {0u};
DestroyCallback m_cb = nullptr;
};
@ -88,7 +106,13 @@ public:
// Implementation is responsible for setting the appropriate callback to
// the view when accessed for writing, to ensure that the data from the view
// is transferred to the device when the view is destroyed
virtual View access(Access) const = 0;
virtual View access(Access) = 0;
virtual void serialize(cv::gapi::s11n::IOStream&) {
GAPI_Assert(false && "Generic serialize method should never be called for RMat adapter");
}
virtual void deserialize(cv::gapi::s11n::IIStream&) {
GAPI_Assert(false && "Generic deserialize method should never be called for RMat adapter");
}
};
using AdapterP = std::shared_ptr<Adapter>;
@ -112,6 +136,10 @@ public:
return dynamic_cast<T*>(m_adapter.get());
}
void serialize(cv::gapi::s11n::IOStream& os) const {
m_adapter->serialize(os);
}
private:
AdapterP m_adapter = nullptr;
};

View File

@ -8,21 +8,27 @@
#define OPENCV_GAPI_S11N_HPP
#include <vector>
#include <map>
#include <unordered_map>
#include <opencv2/gapi/s11n/base.hpp>
#include <opencv2/gapi/gcomputation.hpp>
#include <opencv2/gapi/rmat.hpp>
namespace cv {
namespace gapi {
namespace detail {
GAPI_EXPORTS cv::GComputation getGraph(const std::vector<char> &p);
} // namespace detail
namespace detail {
GAPI_EXPORTS cv::GMetaArgs getMetaArgs(const std::vector<char> &p);
} // namespace detail
namespace detail {
GAPI_EXPORTS cv::GRunArgs getRunArgs(const std::vector<char> &p);
template<typename... Types>
cv::GCompileArgs getCompileArgs(const std::vector<char> &p);
template<typename RMatAdapterType>
cv::GRunArgs getRunArgsWithRMats(const std::vector<char> &p);
} // namespace detail
GAPI_EXPORTS std::vector<char> serialize(const cv::GComputation &c);
@ -33,6 +39,7 @@ T deserialize(const std::vector<char> &p);
//} //ananymous namespace
GAPI_EXPORTS std::vector<char> serialize(const cv::GCompileArgs&);
GAPI_EXPORTS std::vector<char> serialize(const cv::GMetaArgs&);
GAPI_EXPORTS std::vector<char> serialize(const cv::GRunArgs&);
@ -51,7 +58,307 @@ cv::GRunArgs deserialize(const std::vector<char> &p) {
return detail::getRunArgs(p);
}
template<typename T, typename... Types> inline
typename std::enable_if<std::is_same<T, GCompileArgs>::value, GCompileArgs>::
type deserialize(const std::vector<char> &p) {
return detail::getCompileArgs<Types...>(p);
}
template<typename T, typename RMatAdapterType> inline
typename std::enable_if<std::is_same<T, GRunArgs>::value, GRunArgs>::
type deserialize(const std::vector<char> &p) {
return detail::getRunArgsWithRMats<RMatAdapterType>(p);
}
} // namespace gapi
} // namespace cv
namespace cv {
namespace gapi {
namespace s11n {
struct GAPI_EXPORTS IOStream {
virtual ~IOStream() = default;
// Define the native support for basic C++ types at the API level:
virtual IOStream& operator<< (bool) = 0;
virtual IOStream& operator<< (char) = 0;
virtual IOStream& operator<< (unsigned char) = 0;
virtual IOStream& operator<< (short) = 0;
virtual IOStream& operator<< (unsigned short) = 0;
virtual IOStream& operator<< (int) = 0;
virtual IOStream& operator<< (uint32_t) = 0;
virtual IOStream& operator<< (uint64_t) = 0;
virtual IOStream& operator<< (float) = 0;
virtual IOStream& operator<< (double) = 0;
virtual IOStream& operator<< (const std::string&) = 0;
};
struct GAPI_EXPORTS IIStream {
virtual ~IIStream() = default;
virtual IIStream& operator>> (bool &) = 0;
virtual IIStream& operator>> (std::vector<bool>::reference) = 0;
virtual IIStream& operator>> (char &) = 0;
virtual IIStream& operator>> (unsigned char &) = 0;
virtual IIStream& operator>> (short &) = 0;
virtual IIStream& operator>> (unsigned short &) = 0;
virtual IIStream& operator>> (int &) = 0;
virtual IIStream& operator>> (float &) = 0;
virtual IIStream& operator>> (double &) = 0;
virtual IIStream& operator >> (uint32_t &) = 0;
virtual IIStream& operator >> (uint64_t &) = 0;
virtual IIStream& operator>> (std::string &) = 0;
};
namespace detail {
GAPI_EXPORTS std::unique_ptr<IIStream> getInStream(const std::vector<char> &p);
} // namespace detail
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
// S11N operators
// Note: operators for basic types are defined in IIStream/IOStream
// OpenCV types ////////////////////////////////////////////////////////////////
GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::Point &pt);
GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::Point &pt);
GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::Point2f &pt);
GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::Point2f &pt);
GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::Size &sz);
GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::Size &sz);
GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::Rect &rc);
GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::Rect &rc);
GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::Scalar &s);
GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::Scalar &s);
GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::Mat &m);
GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::Mat &m);
// FIXME: for GRunArgs serailization
#if !defined(GAPI_STANDALONE)
GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::UMat &);
GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::UMat &);
#endif // !defined(GAPI_STANDALONE)
GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::RMat &r);
GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::RMat &r);
GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::gapi::wip::IStreamSource::Ptr &);
GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::gapi::wip::IStreamSource::Ptr &);
GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::detail::VectorRef &);
GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::detail::VectorRef &);
GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::detail::OpaqueRef &);
GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::detail::OpaqueRef &);
GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::MediaFrame &);
GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::MediaFrame &);
// Generic STL types ////////////////////////////////////////////////////////////////
template<typename K, typename V>
IOStream& operator<< (IOStream& os, const std::map<K, V> &m) {
const uint32_t sz = static_cast<uint32_t>(m.size());
os << sz;
for (const auto& it : m) os << it.first << it.second;
return os;
}
template<typename K, typename V>
IIStream& operator>> (IIStream& is, std::map<K, V> &m) {
m.clear();
uint32_t sz = 0u;
is >> sz;
for (std::size_t i = 0; i < sz; ++i) {
K k{};
V v{};
is >> k >> v;
m[k] = v;
}
return is;
}
template<typename K, typename V>
IOStream& operator<< (IOStream& os, const std::unordered_map<K, V> &m) {
const uint32_t sz = static_cast<uint32_t>(m.size());
os << sz;
for (auto &&it : m) os << it.first << it.second;
return os;
}
template<typename K, typename V>
IIStream& operator>> (IIStream& is, std::unordered_map<K, V> &m) {
m.clear();
uint32_t sz = 0u;
is >> sz;
for (std::size_t i = 0; i < sz; ++i) {
K k{};
V v{};
is >> k >> v;
m[k] = v;
}
return is;
}
template<typename T>
IOStream& operator<< (IOStream& os, const std::vector<T> &ts) {
const uint32_t sz = static_cast<uint32_t>(ts.size());
os << sz;
for (auto &&v : ts) os << v;
return os;
}
template<typename T>
IIStream& operator>> (IIStream& is, std::vector<T> &ts) {
uint32_t sz = 0u;
is >> sz;
if (sz == 0u) {
ts.clear();
}
else {
ts.resize(sz);
for (std::size_t i = 0; i < sz; ++i) is >> ts[i];
}
return is;
}
// Generic: variant serialization
namespace detail {
template<typename V>
IOStream& put_v(IOStream&, const V&, std::size_t) {
GAPI_Assert(false && "variant>>: requested index is invalid");
};
template<typename V, typename X, typename... Xs>
IOStream& put_v(IOStream& os, const V& v, std::size_t x) {
return (x == 0u)
? os << cv::util::get<X>(v)
: put_v<V, Xs...>(os, v, x-1);
}
template<typename V>
IIStream& get_v(IIStream&, V&, std::size_t, std::size_t) {
GAPI_Assert(false && "variant<<: requested index is invalid");
}
template<typename V, typename X, typename... Xs>
IIStream& get_v(IIStream& is, V& v, std::size_t i, std::size_t gi) {
if (i == gi) {
X x{};
is >> x;
v = V{std::move(x)};
return is;
} else return get_v<V, Xs...>(is, v, i+1, gi);
}
} // namespace detail
template<typename... Ts>
IOStream& operator<< (IOStream& os, const cv::util::variant<Ts...> &v) {
os << static_cast<uint32_t>(v.index());
return detail::put_v<cv::util::variant<Ts...>, Ts...>(os, v, v.index());
}
template<typename... Ts>
IIStream& operator>> (IIStream& is, cv::util::variant<Ts...> &v) {
int idx = -1;
is >> idx;
GAPI_Assert(idx >= 0 && idx < (int)sizeof...(Ts));
return detail::get_v<cv::util::variant<Ts...>, Ts...>(is, v, 0u, idx);
}
// FIXME: consider a better solution
template<typename... Ts>
void getRunArgByIdx (IIStream& is, cv::util::variant<Ts...> &v, uint32_t idx) {
is = detail::get_v<cv::util::variant<Ts...>, Ts...>(is, v, 0u, idx);
}
} // namespace s11n
namespace detail
{
template<typename T> struct try_deserialize_comparg;
template<> struct try_deserialize_comparg<std::tuple<>> {
static cv::util::optional<GCompileArg> exec(const std::string&, cv::gapi::s11n::IIStream&) {
return { };
}
};
template<typename T, typename... Types>
struct try_deserialize_comparg<std::tuple<T, Types...>> {
static cv::util::optional<GCompileArg> exec(const std::string& tag, cv::gapi::s11n::IIStream& is) {
if (tag == cv::detail::CompileArgTag<T>::tag()) {
static_assert(cv::gapi::s11n::detail::has_S11N_spec<T>::value,
"cv::gapi::deserialize<GCompileArgs, Types...> expects Types to have S11N "
"specializations with deserialization callbacks!");
return cv::util::optional<GCompileArg>(
GCompileArg { cv::gapi::s11n::detail::S11N<T>::deserialize(is) });
}
return try_deserialize_comparg<std::tuple<Types...>>::exec(tag, is);
}
};
template<typename T> struct deserialize_runarg;
template<typename RMatAdapterType>
struct deserialize_runarg {
static GRunArg exec(cv::gapi::s11n::IIStream& is, uint32_t idx) {
if (idx == GRunArg::index_of<RMat>()) {
auto ptr = std::make_shared<RMatAdapterType>();
ptr->deserialize(is);
return GRunArg { RMat(std::move(ptr)) };
} else { // non-RMat arg - use default deserialization
GRunArg arg;
getRunArgByIdx(is, arg, idx);
return arg;
}
}
};
template<typename... Types>
inline cv::util::optional<GCompileArg> tryDeserializeCompArg(const std::string& tag,
const std::vector<char>& sArg) {
std::unique_ptr<cv::gapi::s11n::IIStream> pArgIs = cv::gapi::s11n::detail::getInStream(sArg);
return try_deserialize_comparg<std::tuple<Types...>>::exec(tag, *pArgIs);
}
template<typename... Types>
cv::GCompileArgs getCompileArgs(const std::vector<char> &sArgs) {
cv::GCompileArgs args;
std::unique_ptr<cv::gapi::s11n::IIStream> pIs = cv::gapi::s11n::detail::getInStream(sArgs);
cv::gapi::s11n::IIStream& is = *pIs;
uint32_t sz = 0;
is >> sz;
for (uint32_t i = 0; i < sz; ++i) {
std::string tag;
is >> tag;
std::vector<char> sArg;
is >> sArg;
cv::util::optional<GCompileArg> dArg =
cv::gapi::detail::tryDeserializeCompArg<Types...>(tag, sArg);
if (dArg.has_value())
{
args.push_back(dArg.value());
}
}
return args;
}
template<typename RMatAdapterType>
cv::GRunArgs getRunArgsWithRMats(const std::vector<char> &p) {
std::unique_ptr<cv::gapi::s11n::IIStream> pIs = cv::gapi::s11n::detail::getInStream(p);
cv::gapi::s11n::IIStream& is = *pIs;
cv::GRunArgs args;
uint32_t sz = 0;
is >> sz;
for (uint32_t i = 0; i < sz; ++i) {
uint32_t idx = 0;
is >> idx;
args.push_back(cv::gapi::detail::deserialize_runarg<RMatAdapterType>::exec(is, idx));
}
return args;
}
} // namespace detail
} // namespace gapi
} // namespace cv

View File

@ -0,0 +1,46 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Copyright (C) 2020 Intel Corporation
#ifndef OPENCV_GAPI_S11N_BASE_HPP
#define OPENCV_GAPI_S11N_BASE_HPP
#include <opencv2/gapi/own/assert.hpp>
#include <opencv2/gapi/own/exports.hpp>
namespace cv {
namespace gapi {
namespace s11n {
struct IOStream;
struct IIStream;
namespace detail {
struct NotImplemented {
};
// The default S11N for custom types is NotImplemented
// Don't! sublass from NotImplemented if you actually implement S11N.
template<typename T>
struct S11N: public NotImplemented {
static void serialize(IOStream &, const T &) {
GAPI_Assert(false && "No serialization routine is provided!");
}
static T deserialize(IIStream &) {
GAPI_Assert(false && "No deserialization routine is provided!");
}
};
template<typename T> struct has_S11N_spec {
static constexpr bool value = !std::is_base_of<NotImplemented,
S11N<typename std::decay<T>::type>>::value;
};
} // namespace detail
} // namespace s11n
} // namespace gapi
} // namespace cv
#endif // OPENCV_GAPI_S11N_BASE_HPP

View File

@ -21,9 +21,11 @@
* Note for developers: please don't put videoio dependency in G-API
* because of this file.
*/
#include <chrono>
#include <opencv2/videoio.hpp>
#include <opencv2/gapi/garg.hpp>
#include <opencv2/gapi/streaming/meta.hpp>
namespace cv {
namespace gapi {
@ -55,6 +57,7 @@ protected:
cv::VideoCapture cap;
cv::Mat first;
bool first_pulled = false;
int64_t counter = 0;
void prep()
{
@ -80,19 +83,26 @@ protected:
GAPI_Assert(!first.empty());
first_pulled = true;
data = first; // no need to clone here since it was cloned already
return true;
}
if (!cap.isOpened()) return false;
cv::Mat frame;
if (!cap.read(frame))
else
{
// end-of-stream happened
return false;
if (!cap.isOpened()) return false;
cv::Mat frame;
if (!cap.read(frame))
{
// end-of-stream happened
return false;
}
// Same reason to clone as in prep()
data = frame.clone();
}
// Same reason to clone as in prep()
data = frame.clone();
// Tag data with seq_id/ts
const auto now = std::chrono::system_clock::now();
const auto dur = std::chrono::duration_cast<std::chrono::microseconds>
(now.time_since_epoch());
data.meta[cv::gapi::streaming::meta_tag::timestamp] = int64_t{dur.count()};
data.meta[cv::gapi::streaming::meta_tag::seq_id] = int64_t{counter++};
return true;
}
@ -103,6 +113,12 @@ protected:
}
};
// NB: Overload for using from python
GAPI_EXPORTS_W cv::Ptr<IStreamSource> inline make_capture_src(const std::string& path)
{
return make_src<GCaptureSource>(path);
}
} // namespace wip
} // namespace gapi
} // namespace cv

View File

@ -0,0 +1,84 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Copyright (C) 2020 Intel Corporation
#ifndef OPENCV_GAPI_GSTREAMING_DESYNC_HPP
#define OPENCV_GAPI_GSTREAMING_DESYNC_HPP
#include <tuple>
#include <opencv2/gapi/util/util.hpp>
#include <opencv2/gapi/gtype_traits.hpp>
#include <opencv2/gapi/garg.hpp>
#include <opencv2/gapi/gcall.hpp>
#include <opencv2/gapi/gkernel.hpp>
namespace cv {
namespace gapi {
namespace streaming {
namespace detail {
struct GDesync {
static const char *id() {
return "org.opencv.streaming.desync";
}
// An universal yield for desync.
// Yields output objects according to the input Types...
// Reuses gkernel machinery.
// FIXME: This function can be generic and declared in gkernel.hpp
// (it is there already, but a part of GKernelType[M]
template<typename... R, int... IIs>
static std::tuple<R...> yield(cv::GCall &call, cv::detail::Seq<IIs...>) {
return std::make_tuple(cv::detail::Yield<R>::yield(call, IIs)...);
}
};
template<typename G>
G desync(const G &g) {
cv::GKernel k{
GDesync::id() // kernel id
, "" // kernel tag
, [](const GMetaArgs &a, const GArgs &) {return a;} // outMeta callback
, {cv::detail::GTypeTraits<G>::shape} // output Shape
, {cv::detail::GTypeTraits<G>::op_kind} // input data kinds
, {cv::detail::GObtainCtor<G>::get()} // output template ctors
};
cv::GCall call(std::move(k));
call.pass(g);
return std::get<0>(GDesync::yield<G>(call, cv::detail::MkSeq<1>::type()));
}
} // namespace detail
/**
* @brief Starts a desynchronized branch in the graph.
*
* This operation takes a single G-API data object and returns a
* graph-level "duplicate" of this object.
*
* Operations which use this data object can be desynchronized
* from the rest of the graph.
*
* This operation has no effect when a GComputation is compiled with
* regular cv::GComputation::compile(), since cv::GCompiled objects
* always produce their full output vectors.
*
* This operation only makes sense when a GComputation is compiled in
* straming mode with cv::GComputation::compileStreaming(). If this
* operation is used and there are desynchronized outputs, the user
* should use a special version of cv::GStreamingCompiled::pull()
* which produces an array of cv::util::optional<> objects.
*
* @note This feature is highly experimental now and is currently
* limited to a single GMat argument only.
*/
GAPI_EXPORTS GMat desync(const GMat &g);
} // namespace streaming
} // namespace gapi
} // namespace cv
#endif // OPENCV_GAPI_GSTREAMING_DESYNC_HPP

View File

@ -0,0 +1,62 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Copyright (C) 2020 Intel Corporation
#ifndef OPENCV_GAPI_GSTREAMING_FORMAT_HPP
#define OPENCV_GAPI_GSTREAMING_FORMAT_HPP
#include <opencv2/gapi/gkernel.hpp> // GKernelPackage
namespace cv {
namespace gapi {
namespace streaming {
GAPI_EXPORTS cv::gapi::GKernelPackage kernels();
G_API_OP(GBGR, <GMat(GFrame)>, "org.opencv.streaming.BGR")
{
static GMatDesc outMeta(const GFrameDesc& in) { return GMatDesc{CV_8U, 3, in.size}; }
};
/** @brief Gets bgr plane from input frame
@note Function textual ID is "org.opencv.streaming.BGR"
@param in Input frame
@return Image in BGR format
*/
GAPI_EXPORTS cv::GMat BGR(const cv::GFrame& in);
} // namespace streaming
//! @addtogroup gapi_transform
//! @{
/** @brief Makes a copy of the input image. Note that this copy may be not real
(no actual data copied). Use this function to maintain graph contracts,
e.g when graph's input needs to be passed directly to output, like in Streaming mode.
@note Function textual ID is "org.opencv.streaming.copy"
@param in Input image
@return Copy of the input
*/
GAPI_EXPORTS GMat copy(const GMat& in);
/** @brief Makes a copy of the input frame. Note that this copy may be not real
(no actual data copied). Use this function to maintain graph contracts,
e.g when graph's input needs to be passed directly to output, like in Streaming mode.
@note Function textual ID is "org.opencv.streaming.copy"
@param in Input frame
@return Copy of the input
*/
GAPI_EXPORTS GFrame copy(const GFrame& in);
//! @} gapi_transform
} // namespace gapi
} // namespace cv
#endif // OPENCV_GAPI_GSTREAMING_FORMAT_HPP

View File

@ -0,0 +1,79 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Copyright (C) 2020 Intel Corporation
#ifndef OPENCV_GAPI_GSTREAMING_META_HPP
#define OPENCV_GAPI_GSTREAMING_META_HPP
#include <opencv2/gapi/gopaque.hpp>
#include <opencv2/gapi/gcall.hpp>
#include <opencv2/gapi/gkernel.hpp>
#include <opencv2/gapi/gtype_traits.hpp>
namespace cv {
namespace gapi {
namespace streaming {
// FIXME: the name is debatable
namespace meta_tag {
static constexpr const char * timestamp = "org.opencv.gapi.meta.timestamp";
static constexpr const char * seq_id = "org.opencv.gapi.meta.seq_id";
} // namespace meta_tag
namespace detail {
struct GMeta {
static const char *id() {
return "org.opencv.streaming.meta";
}
// A universal yield for meta(), same as in GDesync
template<typename... R, int... IIs>
static std::tuple<R...> yield(cv::GCall &call, cv::detail::Seq<IIs...>) {
return std::make_tuple(cv::detail::Yield<R>::yield(call, IIs)...);
}
// Also a universal outMeta stub here
static GMetaArgs getOutMeta(const GMetaArgs &args, const GArgs &) {
return args;
}
};
} // namespace detail
template<typename T, typename G>
cv::GOpaque<T> meta(G g, const std::string &tag) {
using O = cv::GOpaque<T>;
cv::GKernel k{
detail::GMeta::id() // kernel id
, tag // kernel tag. Use meta tag here
, &detail::GMeta::getOutMeta // outMeta callback
, {cv::detail::GTypeTraits<O>::shape} // output Shape
, {cv::detail::GTypeTraits<G>::op_kind} // input data kinds
, {cv::detail::GObtainCtor<O>::get()} // output template ctors
};
cv::GCall call(std::move(k));
call.pass(g);
return std::get<0>(detail::GMeta::yield<O>(call, cv::detail::MkSeq<1>::type()));
}
template<typename G>
cv::GOpaque<int64_t> timestamp(G g) {
return meta<int64_t>(g, meta_tag::timestamp);
}
template<typename G>
cv::GOpaque<int64_t> seq_id(G g) {
return meta<int64_t>(g, meta_tag::seq_id);
}
template<typename G>
cv::GOpaque<int64_t> seqNo(G g) {
// Old name, compatibility only
return seq_id(g);
}
} // namespace streaming
} // namespace gapi
} // namespace cv
#endif // OPENCV_GAPI_GSTREAMING_META_HPP

View File

@ -0,0 +1,34 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Copyright (C) 2020 Intel Corporation
#ifndef OPENCV_GAPI_UTIL_COPY_THROUGH_MOVE_HPP
#define OPENCV_GAPI_UTIL_COPY_THROUGH_MOVE_HPP
#include <opencv2/gapi/util/type_traits.hpp> //decay_t
namespace cv
{
namespace util
{
//This is a tool to move initialize captures of a lambda in C++11
template<typename T>
struct copy_through_move_t{
T value;
const T& get() const {return value;}
T& get() {return value;}
copy_through_move_t(T&& g) : value(std::move(g)) {}
copy_through_move_t(copy_through_move_t&&) = default;
copy_through_move_t(copy_through_move_t const& lhs) : copy_through_move_t(std::move(const_cast<copy_through_move_t&>(lhs))) {}
};
template<typename T>
copy_through_move_t<util::decay_t<T>> copy_through_move(T&& t){
return std::forward<T>(t);
}
} // namespace util
} // namespace cv
#endif /* OPENCV_GAPI_UTIL_COPY_THROUGH_MOVE_HPP */

View File

@ -35,9 +35,9 @@ namespace util
// instead {}
optional() {};
optional(const optional&) = default;
explicit optional(T &&value) noexcept;
explicit optional(const T &value) noexcept;
optional(optional &&) noexcept;
explicit optional(T&&) noexcept;
explicit optional(const T&) noexcept;
optional(optional&&) noexcept;
// TODO: optional(nullopt_t) noexcept;
// TODO: optional(const optional<U> &)
// TODO: optional(optional<U> &&)
@ -46,8 +46,8 @@ namespace util
// TODO: optional(U&& value);
// Assignment
optional& operator=(const optional& rhs) = default;
optional& operator=(optional&& rhs);
optional& operator=(const optional&) = default;
optional& operator=(optional&&);
// Observers
T* operator-> ();

View File

@ -16,6 +16,32 @@
*/
namespace cv { namespace gapi {
/** @brief Structure for the Kalman filter's initialization parameters.*/
struct GAPI_EXPORTS KalmanParams
{
// initial state
//! corrected state (x(k)): x(k)=x'(k)+K(k)*(z(k)-H*x'(k))
Mat state;
//! posteriori error estimate covariance matrix (P(k)): P(k)=(I-K(k)*H)*P'(k)
Mat errorCov;
// dynamic system description
//! state transition matrix (A)
Mat transitionMatrix;
//! measurement matrix (H)
Mat measurementMatrix;
//! process noise covariance matrix (Q)
Mat processNoiseCov;
//! measurement noise covariance matrix (R)
Mat measurementNoiseCov;
//! control matrix (B) (Optional: not used if there's no control)
Mat controlMatrix;
};
namespace video
{
using GBuildPyrOutput = std::tuple<GArray<GMat>, GScalar>;
@ -62,6 +88,95 @@ G_TYPED_KERNEL(GCalcOptFlowLKForPyr,
return std::make_tuple(empty_array_desc(), empty_array_desc(), empty_array_desc());
}
};
enum BackgroundSubtractorType
{
TYPE_BS_MOG2,
TYPE_BS_KNN
};
/** @brief Structure for the Background Subtractor operation's initialization parameters.*/
struct BackgroundSubtractorParams
{
//! Type of the Background Subtractor operation.
BackgroundSubtractorType operation = TYPE_BS_MOG2;
//! Length of the history.
int history = 500;
//! For MOG2: Threshold on the squared Mahalanobis distance between the pixel
//! and the model to decide whether a pixel is well described by
//! the background model.
//! For KNN: Threshold on the squared distance between the pixel and the sample
//! to decide whether a pixel is close to that sample.
double threshold = 16;
//! If true, the algorithm will detect shadows and mark them.
bool detectShadows = true;
//! The value between 0 and 1 that indicates how fast
//! the background model is learnt.
//! Negative parameter value makes the algorithm use some automatically
//! chosen learning rate.
double learningRate = -1;
//! default constructor
BackgroundSubtractorParams() {}
/** Full constructor
@param op MOG2/KNN Background Subtractor type.
@param histLength Length of the history.
@param thrshld For MOG2: Threshold on the squared Mahalanobis distance between
the pixel and the model to decide whether a pixel is well described by the background model.
For KNN: Threshold on the squared distance between the pixel and the sample to decide
whether a pixel is close to that sample.
@param detect If true, the algorithm will detect shadows and mark them. It decreases the
speed a bit, so if you do not need this feature, set the parameter to false.
@param lRate The value between 0 and 1 that indicates how fast the background model is learnt.
Negative parameter value makes the algorithm to use some automatically chosen learning rate.
*/
BackgroundSubtractorParams(BackgroundSubtractorType op, int histLength,
double thrshld, bool detect, double lRate) : operation(op),
history(histLength),
threshold(thrshld),
detectShadows(detect),
learningRate(lRate){}
};
G_TYPED_KERNEL(GBackgroundSubtractor, <GMat(GMat, BackgroundSubtractorParams)>,
"org.opencv.video.BackgroundSubtractor")
{
static GMatDesc outMeta(const GMatDesc& in, const BackgroundSubtractorParams& bsParams)
{
GAPI_Assert(bsParams.history >= 0);
GAPI_Assert(bsParams.learningRate <= 1);
return in.withType(CV_8U, 1);
}
};
void checkParams(const cv::gapi::KalmanParams& kfParams,
const cv::GMatDesc& measurement, const cv::GMatDesc& control = {});
G_TYPED_KERNEL(GKalmanFilter, <GMat(GMat, GOpaque<bool>, GMat, KalmanParams)>,
"org.opencv.video.KalmanFilter")
{
static GMatDesc outMeta(const GMatDesc& measurement, const GOpaqueDesc&,
const GMatDesc& control, const KalmanParams& kfParams)
{
checkParams(kfParams, measurement, control);
return measurement.withSize(Size(1, kfParams.transitionMatrix.rows));
}
};
G_TYPED_KERNEL(GKalmanFilterNoControl, <GMat(GMat, GOpaque<bool>, KalmanParams)>, "org.opencv.video.KalmanFilterNoControl")
{
static GMatDesc outMeta(const GMatDesc& measurement, const GOpaqueDesc&, const KalmanParams& kfParams)
{
checkParams(kfParams, measurement);
return measurement.withSize(Size(1, kfParams.transitionMatrix.rows));
}
};
} //namespace video
//! @addtogroup gapi_video
@ -83,8 +198,9 @@ G_TYPED_KERNEL(GCalcOptFlowLKForPyr,
@param tryReuseInputImage put ROI of input image into the pyramid if possible. You can pass false
to force data copying.
@return output pyramid.
@return number of levels in constructed pyramid. Can be less than maxLevel.
@return
- output pyramid.
- number of levels in constructed pyramid. Can be less than maxLevel.
*/
GAPI_EXPORTS std::tuple<GArray<GMat>, GScalar>
buildOpticalFlowPyramid(const GMat &img,
@ -131,11 +247,12 @@ by number of pixels in a window; if this value is less than minEigThreshold, the
feature is filtered out and its flow is not processed, so it allows to remove bad points and get a
performance boost.
@return GArray of 2D points (with single-precision floating-point coordinates)
@return
- GArray of 2D points (with single-precision floating-point coordinates)
containing the calculated new positions of input features in the second image.
@return status GArray (of unsigned chars); each element of the vector is set to 1 if
- status GArray (of unsigned chars); each element of the vector is set to 1 if
the flow for the corresponding features has been found, otherwise, it is set to 0.
@return GArray of errors (doubles); each element of the vector is set to an error for the
- GArray of errors (doubles); each element of the vector is set to an error for the
corresponding feature, type of the error measure can be set in flags parameter; if the flow wasn't
found then the error is not defined (use the status parameter to find such cases).
*/
@ -169,8 +286,75 @@ calcOpticalFlowPyrLK(const GArray<GMat> &prevPyr,
int flags = 0,
double minEigThresh = 1e-4);
/** @brief Gaussian Mixture-based or K-nearest neighbours-based Background/Foreground Segmentation Algorithm.
The operation generates a foreground mask.
@return Output image is foreground mask, i.e. 8-bit unsigned 1-channel (binary) matrix @ref CV_8UC1.
@note Functional textual ID is "org.opencv.video.BackgroundSubtractor"
@param src input image: Floating point frame is used without scaling and should be in range [0,255].
@param bsParams Set of initialization parameters for Background Subtractor kernel.
*/
GAPI_EXPORTS GMat BackgroundSubtractor(const GMat& src, const cv::gapi::video::BackgroundSubtractorParams& bsParams);
/** @brief Standard Kalman filter algorithm <http://en.wikipedia.org/wiki/Kalman_filter>.
@note Functional textual ID is "org.opencv.video.KalmanFilter"
@param measurement input matrix: 32-bit or 64-bit float 1-channel matrix containing measurements.
@param haveMeasurement dynamic input flag that indicates whether we get measurements
at a particular iteration .
@param control input matrix: 32-bit or 64-bit float 1-channel matrix contains control data
for changing dynamic system.
@param kfParams Set of initialization parameters for Kalman filter kernel.
@return Output matrix is predicted or corrected state. They can be 32-bit or 64-bit float
1-channel matrix @ref CV_32FC1 or @ref CV_64FC1.
@details If measurement matrix is given (haveMeasurements == true), corrected state will
be returned which corresponds to the pipeline
cv::KalmanFilter::predict(control) -> cv::KalmanFilter::correct(measurement).
Otherwise, predicted state will be returned which corresponds to the call of
cv::KalmanFilter::predict(control).
@sa cv::KalmanFilter
*/
GAPI_EXPORTS GMat KalmanFilter(const GMat& measurement, const GOpaque<bool>& haveMeasurement,
const GMat& control, const cv::gapi::KalmanParams& kfParams);
/** @overload
The case of Standard Kalman filter algorithm when there is no control in a dynamic system.
In this case the controlMatrix is empty and control vector is absent.
@note Function textual ID is "org.opencv.video.KalmanFilterNoControl"
@param measurement input matrix: 32-bit or 64-bit float 1-channel matrix containing measurements.
@param haveMeasurement dynamic input flag that indicates whether we get measurements
at a particular iteration.
@param kfParams Set of initialization parameters for Kalman filter kernel.
@return Output matrix is predicted or corrected state. They can be 32-bit or 64-bit float
1-channel matrix @ref CV_32FC1 or @ref CV_64FC1.
@sa cv::KalmanFilter
*/
GAPI_EXPORTS GMat KalmanFilter(const GMat& measurement, const GOpaque<bool>& haveMeasurement,
const cv::gapi::KalmanParams& kfParams);
//! @} gapi_video
} //namespace gapi
} //namespace cv
namespace cv { namespace detail {
template<> struct CompileArgTag<cv::gapi::video::BackgroundSubtractorParams>
{
static const char* tag()
{
return "org.opencv.video.background_substractor_params";
}
};
} // namespace detail
} // namespace cv
#endif // OPENCV_GAPI_VIDEO_HPP

View File

@ -1,4 +1,18 @@
#ifndef OPENCV_GAPI_PYOPENCV_GAPI_HPP
#define OPENCV_GAPI_PYOPENCV_GAPI_HPP
#ifdef HAVE_OPENCV_GAPI
// NB: Python wrapper replaces :: with _ for classes
using gapi_GKernelPackage = cv::gapi::GKernelPackage;
using gapi_GNetPackage = cv::gapi::GNetPackage;
using gapi_ie_PyParams = cv::gapi::ie::PyParams;
using gapi_wip_IStreamSource_Ptr = cv::Ptr<cv::gapi::wip::IStreamSource>;
// FIXME: Python wrapper generate code without namespace std,
// so it cause error: "string wasn't declared"
// WA: Create using
using std::string;
template<>
bool pyopencv_to(PyObject* obj, std::vector<GCompileArg>& value, const ArgInfo& info)
@ -12,6 +26,90 @@ PyObject* pyopencv_from(const std::vector<GCompileArg>& value)
return pyopencv_from_generic_vec(value);
}
template<>
bool pyopencv_to(PyObject* obj, GRunArgs& value, const ArgInfo& info)
{
return pyopencv_to_generic_vec(obj, value, info);
}
static PyObject* from_grunarg(const GRunArg& v)
{
switch (v.index())
{
case GRunArg::index_of<cv::Mat>():
{
const auto& m = util::get<cv::Mat>(v);
return pyopencv_from(m);
}
case GRunArg::index_of<cv::Scalar>():
{
const auto& s = util::get<cv::Scalar>(v);
return pyopencv_from(s);
}
case GRunArg::index_of<cv::detail::VectorRef>():
{
const auto& vref = util::get<cv::detail::VectorRef>(v);
switch (vref.getKind())
{
case cv::detail::OpaqueKind::CV_POINT2F:
return pyopencv_from(vref.rref<cv::Point2f>());
default:
PyErr_SetString(PyExc_TypeError, "Unsupported kind for GArray");
return NULL;
}
}
default:
PyErr_SetString(PyExc_TypeError, "Failed to unpack GRunArgs");
return NULL;
}
GAPI_Assert(false);
}
template<>
PyObject* pyopencv_from(const GRunArgs& value)
{
size_t i, n = value.size();
// NB: It doesn't make sense to return list with a single element
if (n == 1)
{
PyObject* item = from_grunarg(value[0]);
if(!item)
{
return NULL;
}
return item;
}
PyObject* list = PyList_New(n);
for(i = 0; i < n; ++i)
{
PyObject* item = from_grunarg(value[i]);
if(!item)
{
Py_DECREF(list);
PyErr_SetString(PyExc_TypeError, "Failed to unpack GRunArgs");
return NULL;
}
PyList_SetItem(list, i, item);
}
return list;
}
template<>
bool pyopencv_to(PyObject* obj, GMetaArgs& value, const ArgInfo& info)
{
return pyopencv_to_generic_vec(obj, value, info);
}
template<>
PyObject* pyopencv_from(const GMetaArgs& value)
{
return pyopencv_from_generic_vec(value);
}
template <typename T>
static PyObject* extract_proto_args(PyObject* py_args, PyObject* kw)
{
@ -19,14 +117,24 @@ static PyObject* extract_proto_args(PyObject* py_args, PyObject* kw)
GProtoArgs args;
Py_ssize_t size = PyTuple_Size(py_args);
for (int i = 0; i < size; ++i) {
for (int i = 0; i < size; ++i)
{
PyObject* item = PyTuple_GetItem(py_args, i);
if (PyObject_TypeCheck(item, reinterpret_cast<PyTypeObject*>(pyopencv_GScalar_TypePtr))) {
if (PyObject_TypeCheck(item, reinterpret_cast<PyTypeObject*>(pyopencv_GScalar_TypePtr)))
{
args.emplace_back(reinterpret_cast<pyopencv_GScalar_t*>(item)->v);
} else if (PyObject_TypeCheck(item, reinterpret_cast<PyTypeObject*>(pyopencv_GMat_TypePtr))) {
}
else if (PyObject_TypeCheck(item, reinterpret_cast<PyTypeObject*>(pyopencv_GMat_TypePtr)))
{
args.emplace_back(reinterpret_cast<pyopencv_GMat_t*>(item)->v);
} else {
PyErr_SetString(PyExc_TypeError, "cv.GIn() supports only cv.GMat and cv.GScalar");
}
else if (PyObject_TypeCheck(item, reinterpret_cast<PyTypeObject*>(pyopencv_GArrayP2f_TypePtr)))
{
args.emplace_back(reinterpret_cast<pyopencv_GArrayP2f_t*>(item)->v.strip());
}
else
{
PyErr_SetString(PyExc_TypeError, "Unsupported type for cv.GIn()/cv.GOut()");
return NULL;
}
}
@ -43,3 +151,64 @@ static PyObject* pyopencv_cv_GOut(PyObject* , PyObject* py_args, PyObject* kw)
{
return extract_proto_args<GProtoOutputArgs>(py_args, kw);
}
static PyObject* pyopencv_cv_gin(PyObject* , PyObject* py_args, PyObject* kw)
{
using namespace cv;
GRunArgs args;
Py_ssize_t size = PyTuple_Size(py_args);
for (int i = 0; i < size; ++i)
{
PyObject* item = PyTuple_GetItem(py_args, i);
if (PyTuple_Check(item))
{
cv::Scalar s;
if (pyopencv_to(item, s, ArgInfo("scalar", false)))
{
args.emplace_back(s);
}
else
{
PyErr_SetString(PyExc_TypeError, "Failed convert tuple to cv::Scalar");
return NULL;
}
}
else if (PyArray_Check(item))
{
cv::Mat m;
if (pyopencv_to(item, m, ArgInfo("mat", false)))
{
args.emplace_back(m);
}
else
{
PyErr_SetString(PyExc_TypeError, "Failed convert array to cv::Mat");
return NULL;
}
}
else if (PyObject_TypeCheck(item,
reinterpret_cast<PyTypeObject*>(pyopencv_gapi_wip_IStreamSource_TypePtr)))
{
cv::gapi::wip::IStreamSource::Ptr source =
reinterpret_cast<pyopencv_gapi_wip_IStreamSource_t*>(item)->v;
args.emplace_back(source);
}
else
{
PyErr_SetString(PyExc_TypeError, "cv.gin can works only with cv::Mat,"
"cv::Scalar, cv::gapi::wip::IStreamSource::Ptr");
return NULL;
}
}
return pyopencv_from_generic_vec(args);
}
static PyObject* pyopencv_cv_gout(PyObject* o, PyObject* py_args, PyObject* kw)
{
return pyopencv_cv_gin(o, py_args, kw);
}
#endif // HAVE_OPENCV_GAPI
#endif // OPENCV_GAPI_PYOPENCV_GAPI_HPP

View File

@ -3,11 +3,30 @@
namespace cv
{
struct GAPI_EXPORTS_W_SIMPLE GCompileArg { };
GAPI_EXPORTS_W GCompileArgs compile_args(gapi::GKernelPackage pkg);
GAPI_EXPORTS_W GCompileArgs compile_args(gapi::GNetPackage pkg);
// NB: This classes doesn't exist in *.so
// HACK: Mark them as a class to force python wrapper generate code for this entities
class GAPI_EXPORTS_W_SIMPLE GProtoArg { };
class GAPI_EXPORTS_W_SIMPLE GProtoInputArgs { };
class GAPI_EXPORTS_W_SIMPLE GProtoOutputArgs { };
class GAPI_EXPORTS_W_SIMPLE GRunArg { };
class GAPI_EXPORTS_W_SIMPLE GMetaArg { };
class GAPI_EXPORTS_W_SIMPLE GArrayP2f { };
using GProtoInputArgs = GIOProtoArgs<In_Tag>;
using GProtoOutputArgs = GIOProtoArgs<Out_Tag>;
namespace gapi
{
GAPI_EXPORTS_W gapi::GNetPackage networks(const cv::gapi::ie::PyParams& params);
namespace wip
{
class GAPI_EXPORTS_W IStreamSource { };
} // namespace wip
} // namespace gapi
} // namespace cv

View File

@ -2,29 +2,30 @@
import numpy as np
import cv2 as cv
import os
from tests_common import NewOpenCVTests
# Plaidml is an optional backend
pkgs = [
cv.gapi.core.ocl.kernels(),
cv.gapi.core.cpu.kernels(),
cv.gapi.core.fluid.kernels()
# cv.gapi.core.plaidml.kernels()
]
('ocl' , cv.gapi.core.ocl.kernels()),
('cpu' , cv.gapi.core.cpu.kernels()),
('fluid' , cv.gapi.core.fluid.kernels())
# ('plaidml', cv.gapi.core.plaidml.kernels())
]
class gapi_core_test(NewOpenCVTests):
def test_add(self):
# TODO: Extend to use any type and size here
sz = (1280, 720)
in1 = np.random.randint(0, 100, sz).astype(np.uint8)
in2 = np.random.randint(0, 100, sz).astype(np.uint8)
sz = (720, 1280)
in1 = np.full(sz, 100)
in2 = np.full(sz, 50)
# OpenCV
expected = in1 + in2
expected = cv.add(in1, in2)
# G-API
g_in1 = cv.GMat()
@ -32,15 +33,39 @@ class gapi_core_test(NewOpenCVTests):
g_out = cv.gapi.add(g_in1, g_in2)
comp = cv.GComputation(cv.GIn(g_in1, g_in2), cv.GOut(g_out))
for pkg in pkgs:
actual = comp.apply(in1, in2, args=cv.compile_args(pkg))
for pkg_name, pkg in pkgs:
actual = comp.apply(cv.gin(in1, in2), args=cv.compile_args(pkg))
# Comparison
self.assertEqual(0.0, cv.norm(expected, actual, cv.NORM_INF))
self.assertEqual(0.0, cv.norm(expected, actual, cv.NORM_INF),
'Failed on ' + pkg_name + ' backend')
self.assertEqual(expected.dtype, actual.dtype, 'Failed on ' + pkg_name + ' backend')
def test_add_uint8(self):
sz = (720, 1280)
in1 = np.full(sz, 100, dtype=np.uint8)
in2 = np.full(sz, 50 , dtype=np.uint8)
# OpenCV
expected = cv.add(in1, in2)
# G-API
g_in1 = cv.GMat()
g_in2 = cv.GMat()
g_out = cv.gapi.add(g_in1, g_in2)
comp = cv.GComputation(cv.GIn(g_in1, g_in2), cv.GOut(g_out))
for pkg_name, pkg in pkgs:
actual = comp.apply(cv.gin(in1, in2), args=cv.compile_args(pkg))
# Comparison
self.assertEqual(0.0, cv.norm(expected, actual, cv.NORM_INF),
'Failed on ' + pkg_name + ' backend')
self.assertEqual(expected.dtype, actual.dtype, 'Failed on ' + pkg_name + ' backend')
def test_mean(self):
sz = (1280, 720, 3)
in_mat = np.random.randint(0, 100, sz).astype(np.uint8)
img_path = self.find_file('cv/face/david2.jpg', [os.environ.get('OPENCV_TEST_DATA_PATH')])
in_mat = cv.imread(img_path)
# OpenCV
expected = cv.mean(in_mat)
@ -50,10 +75,57 @@ class gapi_core_test(NewOpenCVTests):
g_out = cv.gapi.mean(g_in)
comp = cv.GComputation(g_in, g_out)
for pkg in pkgs:
actual = comp.apply(in_mat, args=cv.compile_args(pkg))
for pkg_name, pkg in pkgs:
actual = comp.apply(cv.gin(in_mat), args=cv.compile_args(pkg))
# Comparison
self.assertEqual(0.0, cv.norm(expected, actual, cv.NORM_INF))
self.assertEqual(0.0, cv.norm(expected, actual, cv.NORM_INF),
'Failed on ' + pkg_name + ' backend')
def test_split3(self):
img_path = self.find_file('cv/face/david2.jpg', [os.environ.get('OPENCV_TEST_DATA_PATH')])
in_mat = cv.imread(img_path)
# OpenCV
expected = cv.split(in_mat)
# G-API
g_in = cv.GMat()
b, g, r = cv.gapi.split3(g_in)
comp = cv.GComputation(cv.GIn(g_in), cv.GOut(b, g, r))
for pkg_name, pkg in pkgs:
actual = comp.apply(cv.gin(in_mat), args=cv.compile_args(pkg))
# Comparison
for e, a in zip(expected, actual):
self.assertEqual(0.0, cv.norm(e, a, cv.NORM_INF),
'Failed on ' + pkg_name + ' backend')
self.assertEqual(e.dtype, a.dtype, 'Failed on ' + pkg_name + ' backend')
def test_threshold(self):
img_path = self.find_file('cv/face/david2.jpg', [os.environ.get('OPENCV_TEST_DATA_PATH')])
in_mat = cv.cvtColor(cv.imread(img_path), cv.COLOR_RGB2GRAY)
maxv = (30, 30)
# OpenCV
expected_thresh, expected_mat = cv.threshold(in_mat, maxv[0], maxv[0], cv.THRESH_TRIANGLE)
# G-API
g_in = cv.GMat()
g_sc = cv.GScalar()
mat, threshold = cv.gapi.threshold(g_in, g_sc, cv.THRESH_TRIANGLE)
comp = cv.GComputation(cv.GIn(g_in, g_sc), cv.GOut(mat, threshold))
for pkg_name, pkg in pkgs:
actual_mat, actual_thresh = comp.apply(cv.gin(in_mat, maxv), args=cv.compile_args(pkg))
# Comparison
self.assertEqual(0.0, cv.norm(expected_mat, actual_mat, cv.NORM_INF),
'Failed on ' + pkg_name + ' backend')
self.assertEqual(expected_mat.dtype, actual_mat.dtype,
'Failed on ' + pkg_name + ' backend')
self.assertEqual(expected_thresh, actual_thresh[0],
'Failed on ' + pkg_name + ' backend')
if __name__ == '__main__':

View File

@ -0,0 +1,79 @@
#!/usr/bin/env python
import numpy as np
import cv2 as cv
import os
from tests_common import NewOpenCVTests
# Plaidml is an optional backend
pkgs = [
('ocl' , cv.gapi.core.ocl.kernels()),
('cpu' , cv.gapi.core.cpu.kernels()),
('fluid' , cv.gapi.core.fluid.kernels())
# ('plaidml', cv.gapi.core.plaidml.kernels())
]
class gapi_imgproc_test(NewOpenCVTests):
def test_good_features_to_track(self):
# TODO: Extend to use any type and size here
img_path = self.find_file('cv/face/david2.jpg', [os.environ.get('OPENCV_TEST_DATA_PATH')])
in1 = cv.cvtColor(cv.imread(img_path), cv.COLOR_RGB2GRAY)
# NB: goodFeaturesToTrack configuration
max_corners = 50
quality_lvl = 0.01
min_distance = 10
block_sz = 3
use_harris_detector = True
k = 0.04
mask = None
# OpenCV
expected = cv.goodFeaturesToTrack(in1, max_corners, quality_lvl,
min_distance, mask=mask,
blockSize=block_sz, useHarrisDetector=use_harris_detector, k=k)
# G-API
g_in = cv.GMat()
g_out = cv.gapi.goodFeaturesToTrack(g_in, max_corners, quality_lvl,
min_distance, mask, block_sz, use_harris_detector, k)
comp = cv.GComputation(cv.GIn(g_in), cv.GOut(g_out))
for pkg_name, pkg in pkgs:
actual = comp.apply(cv.gin(in1), args=cv.compile_args(pkg))
# NB: OpenCV & G-API have different output shapes:
# OpenCV - (num_points, 1, 2)
# G-API - (num_points, 2)
# Comparison
self.assertEqual(0.0, cv.norm(expected.flatten(), actual.flatten(), cv.NORM_INF),
'Failed on ' + pkg_name + ' backend')
def test_rgb2gray(self):
# TODO: Extend to use any type and size here
img_path = self.find_file('cv/face/david2.jpg', [os.environ.get('OPENCV_TEST_DATA_PATH')])
in1 = cv.imread(img_path)
# OpenCV
expected = cv.cvtColor(in1, cv.COLOR_RGB2GRAY)
# G-API
g_in = cv.GMat()
g_out = cv.gapi.RGB2Gray(g_in)
comp = cv.GComputation(cv.GIn(g_in), cv.GOut(g_out))
for pkg_name, pkg in pkgs:
actual = comp.apply(cv.gin(in1), args=cv.compile_args(pkg))
# Comparison
self.assertEqual(0.0, cv.norm(expected, actual, cv.NORM_INF),
'Failed on ' + pkg_name + ' backend')
if __name__ == '__main__':
NewOpenCVTests.bootstrap()

View File

@ -0,0 +1,62 @@
#!/usr/bin/env python
import numpy as np
import cv2 as cv
import os
from tests_common import NewOpenCVTests
class test_gapi_infer(NewOpenCVTests):
def test_getAvailableTargets(self):
targets = cv.dnn.getAvailableTargets(cv.dnn.DNN_BACKEND_OPENCV)
self.assertTrue(cv.dnn.DNN_TARGET_CPU in targets)
def test_age_gender_infer(self):
# NB: Check IE
if not cv.dnn.DNN_TARGET_CPU in cv.dnn.getAvailableTargets(cv.dnn.DNN_BACKEND_INFERENCE_ENGINE):
return
root_path = '/omz_intel_models/intel/age-gender-recognition-retail-0013/FP32/age-gender-recognition-retail-0013'
model_path = self.find_file(root_path + '.xml', [os.environ.get('OPENCV_DNN_TEST_DATA_PATH')])
weights_path = self.find_file(root_path + '.bin', [os.environ.get('OPENCV_DNN_TEST_DATA_PATH')])
img_path = self.find_file('cv/face/david2.jpg', [os.environ.get('OPENCV_TEST_DATA_PATH')])
device_id = 'CPU'
img = cv.resize(cv.imread(img_path), (62,62))
# OpenCV DNN
net = cv.dnn.readNetFromModelOptimizer(model_path, weights_path)
net.setPreferableBackend(cv.dnn.DNN_BACKEND_INFERENCE_ENGINE)
net.setPreferableTarget(cv.dnn.DNN_TARGET_CPU)
blob = cv.dnn.blobFromImage(img)
net.setInput(blob)
dnn_age, dnn_gender = net.forward(net.getUnconnectedOutLayersNames())
# OpenCV G-API
g_in = cv.GMat()
inputs = cv.GInferInputs()
inputs.setInput('data', g_in)
outputs = cv.gapi.infer("net", inputs)
age_g = outputs.at("age_conv3")
gender_g = outputs.at("prob")
comp = cv.GComputation(cv.GIn(g_in), cv.GOut(age_g, gender_g))
pp = cv.gapi.ie.params("net", model_path, weights_path, device_id)
nets = cv.gapi.networks(pp)
args = cv.compile_args(nets)
gapi_age, gapi_gender = comp.apply(cv.gin(img), args=cv.compile_args(cv.gapi.networks(pp)))
# Check
self.assertEqual(0.0, cv.norm(dnn_gender, gapi_gender, cv.NORM_INF))
self.assertEqual(0.0, cv.norm(dnn_age, gapi_age, cv.NORM_INF))
if __name__ == '__main__':
NewOpenCVTests.bootstrap()

View File

@ -2,25 +2,26 @@
import numpy as np
import cv2 as cv
import os
from tests_common import NewOpenCVTests
# Plaidml is an optional backend
pkgs = [
cv.gapi.core.ocl.kernels(),
cv.gapi.core.cpu.kernels(),
cv.gapi.core.fluid.kernels()
# cv.gapi.core.plaidml.kernels()
]
('ocl' , cv.gapi.core.ocl.kernels()),
('cpu' , cv.gapi.core.cpu.kernels()),
('fluid' , cv.gapi.core.fluid.kernels())
# ('plaidml', cv.gapi.core.plaidml.kernels())
]
class gapi_sample_pipelines(NewOpenCVTests):
# NB: This test check multiple outputs for operation
def test_mean_over_r(self):
sz = (100, 100, 3)
in_mat = np.random.randint(0, 100, sz).astype(np.uint8)
img_path = self.find_file('cv/face/david2.jpg', [os.environ.get('OPENCV_TEST_DATA_PATH')])
in_mat = cv.imread(img_path)
# # OpenCV
_, _, r_ch = cv.split(in_mat)
@ -32,10 +33,11 @@ class gapi_sample_pipelines(NewOpenCVTests):
g_out = cv.gapi.mean(r)
comp = cv.GComputation(g_in, g_out)
for pkg in pkgs:
actual = comp.apply(in_mat, args=cv.compile_args(pkg))
for pkg_name, pkg in pkgs:
actual = comp.apply(cv.gin(in_mat), args=cv.compile_args(pkg))
# Comparison
self.assertEqual(0.0, cv.norm(expected, actual, cv.NORM_INF))
self.assertEqual(0.0, cv.norm(expected, actual, cv.NORM_INF),
'Failed on ' + pkg_name + ' backend')
if __name__ == '__main__':

View File

@ -0,0 +1,202 @@
#!/usr/bin/env python
import numpy as np
import cv2 as cv
import os
from tests_common import NewOpenCVTests
class test_gapi_streaming(NewOpenCVTests):
def test_image_input(self):
sz = (1280, 720)
in_mat = np.random.randint(0, 100, sz).astype(np.uint8)
# OpenCV
expected = cv.medianBlur(in_mat, 3)
# G-API
g_in = cv.GMat()
g_out = cv.gapi.medianBlur(g_in, 3)
c = cv.GComputation(g_in, g_out)
ccomp = c.compileStreaming(cv.descr_of(cv.gin(in_mat)))
ccomp.setSource(cv.gin(in_mat))
ccomp.start()
_, actual = ccomp.pull()
# Assert
self.assertEqual(0.0, cv.norm(expected, actual, cv.NORM_INF))
def test_video_input(self):
ksize = 3
path = self.find_file('cv/video/768x576.avi', [os.environ['OPENCV_TEST_DATA_PATH']])
# OpenCV
cap = cv.VideoCapture(path)
# G-API
g_in = cv.GMat()
g_out = cv.gapi.medianBlur(g_in, ksize)
c = cv.GComputation(g_in, g_out)
ccomp = c.compileStreaming()
source = cv.gapi.wip.make_capture_src(path)
ccomp.setSource(source)
ccomp.start()
# Assert
max_num_frames = 10
proc_num_frames = 0
while cap.isOpened():
has_expected, expected = cap.read()
has_actual, actual = ccomp.pull()
self.assertEqual(has_expected, has_actual)
if not has_actual:
break
self.assertEqual(0.0, cv.norm(cv.medianBlur(expected, ksize), actual, cv.NORM_INF))
proc_num_frames += 1
if proc_num_frames == max_num_frames:
break;
def test_video_split3(self):
path = self.find_file('cv/video/768x576.avi', [os.environ['OPENCV_TEST_DATA_PATH']])
# OpenCV
cap = cv.VideoCapture(path)
# G-API
g_in = cv.GMat()
b, g, r = cv.gapi.split3(g_in)
c = cv.GComputation(cv.GIn(g_in), cv.GOut(b, g, r))
ccomp = c.compileStreaming()
source = cv.gapi.wip.make_capture_src(path)
ccomp.setSource(source)
ccomp.start()
# Assert
max_num_frames = 10
proc_num_frames = 0
while cap.isOpened():
has_expected, frame = cap.read()
has_actual, actual = ccomp.pull()
self.assertEqual(has_expected, has_actual)
if not has_actual:
break
expected = cv.split(frame)
for e, a in zip(expected, actual):
self.assertEqual(0.0, cv.norm(e, a, cv.NORM_INF))
proc_num_frames += 1
if proc_num_frames == max_num_frames:
break;
def test_video_add(self):
sz = (576, 768, 3)
in_mat = np.random.randint(0, 100, sz).astype(np.uint8)
path = self.find_file('cv/video/768x576.avi', [os.environ['OPENCV_TEST_DATA_PATH']])
# OpenCV
cap = cv.VideoCapture(path)
# G-API
g_in1 = cv.GMat()
g_in2 = cv.GMat()
out = cv.gapi.add(g_in1, g_in2)
c = cv.GComputation(cv.GIn(g_in1, g_in2), cv.GOut(out))
ccomp = c.compileStreaming()
source = cv.gapi.wip.make_capture_src(path)
ccomp.setSource(cv.gin(source, in_mat))
ccomp.start()
# Assert
max_num_frames = 10
proc_num_frames = 0
while cap.isOpened():
has_expected, frame = cap.read()
has_actual, actual = ccomp.pull()
self.assertEqual(has_expected, has_actual)
if not has_actual:
break
expected = cv.add(frame, in_mat)
self.assertEqual(0.0, cv.norm(expected, actual, cv.NORM_INF))
proc_num_frames += 1
if proc_num_frames == max_num_frames:
break;
def test_video_good_features_to_track(self):
path = self.find_file('cv/video/768x576.avi', [os.environ['OPENCV_TEST_DATA_PATH']])
# NB: goodFeaturesToTrack configuration
max_corners = 50
quality_lvl = 0.01
min_distance = 10
block_sz = 3
use_harris_detector = True
k = 0.04
mask = None
# OpenCV
cap = cv.VideoCapture(path)
# G-API
g_in = cv.GMat()
g_gray = cv.gapi.RGB2Gray(g_in)
g_out = cv.gapi.goodFeaturesToTrack(g_gray, max_corners, quality_lvl,
min_distance, mask, block_sz, use_harris_detector, k)
c = cv.GComputation(cv.GIn(g_in), cv.GOut(g_out))
ccomp = c.compileStreaming()
source = cv.gapi.wip.make_capture_src(path)
ccomp.setSource(source)
ccomp.start()
# Assert
max_num_frames = 10
proc_num_frames = 0
while cap.isOpened():
has_expected, frame = cap.read()
has_actual, actual = ccomp.pull()
self.assertEqual(has_expected, has_actual)
if not has_actual:
break
# OpenCV
frame = cv.cvtColor(frame, cv.COLOR_RGB2GRAY)
expected = cv.goodFeaturesToTrack(frame, max_corners, quality_lvl,
min_distance, mask=mask,
blockSize=block_sz, useHarrisDetector=use_harris_detector, k=k)
for e, a in zip(expected, actual):
# NB: OpenCV & G-API have different output shapes:
# OpenCV - (num_points, 1, 2)
# G-API - (num_points, 2)
self.assertEqual(0.0, cv.norm(e.flatten(), a.flatten(), cv.NORM_INF))
proc_num_frames += 1
if proc_num_frames == max_num_frames:
break;
if __name__ == '__main__':
NewOpenCVTests.bootstrap()

View File

@ -2,7 +2,7 @@
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Copyright (C) 2018 Intel Corporation
// Copyright (C) 2018-2020 Intel Corporation
#ifndef OPENCV_GAPI_CORE_PERF_TESTS_HPP
@ -52,6 +52,7 @@ namespace opencv_test
class AbsDiffPerfTest : public TestPerfParams<tuple<cv::Size, MatType, cv::GCompileArgs>> {};
class AbsDiffCPerfTest : public TestPerfParams<tuple<cv::Size, MatType, cv::GCompileArgs>> {};
class SumPerfTest : public TestPerfParams<tuple<compare_scalar_f, cv::Size, MatType, cv::GCompileArgs>> {};
class CountNonZeroPerfTest : public TestPerfParams<tuple<compare_scalar_f, cv::Size, MatType, cv::GCompileArgs>> {};
class AddWeightedPerfTest : public TestPerfParams<tuple<compare_f, cv::Size, MatType, int, cv::GCompileArgs>> {};
class NormPerfTest : public TestPerfParams<tuple<compare_scalar_f, NormTypes, cv::Size, MatType, cv::GCompileArgs>> {};
class IntegralPerfTest : public TestPerfParams<tuple<cv::Size, MatType, cv::GCompileArgs>> {};

View File

@ -2,7 +2,7 @@
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Copyright (C) 2018 Intel Corporation
// Copyright (C) 2018-2020 Intel Corporation
#ifndef OPENCV_GAPI_CORE_PERF_TESTS_INL_HPP
@ -1011,6 +1011,44 @@ PERF_TEST_P_(SumPerfTest, TestPerformance)
SANITY_CHECK_NOTHING();
}
//------------------------------------------------------------------------------
#pragma push_macro("countNonZero")
#undef countNonZero
PERF_TEST_P_(CountNonZeroPerfTest, TestPerformance)
{
compare_scalar_f cmpF;
cv::Size sz_in;
MatType type = -1;
cv::GCompileArgs compile_args;
std::tie(cmpF, sz_in, type, compile_args) = GetParam();
initMatrixRandU(type, sz_in, type, false);
int out_cnz_gapi, out_cnz_ocv;
// OpenCV code ///////////////////////////////////////////////////////////
out_cnz_ocv = cv::countNonZero(in_mat1);
// G-API code ////////////////////////////////////////////////////////////
cv::GMat in;
auto out = cv::gapi::countNonZero(in);
cv::GComputation c(cv::GIn(in), cv::GOut(out));
// Warm-up graph engine:
c.apply(cv::gin(in_mat1), cv::gout(out_cnz_gapi), std::move(compile_args));
TEST_CYCLE()
{
c.apply(cv::gin(in_mat1), cv::gout(out_cnz_gapi));
}
// Comparison ////////////////////////////////////////////////////////////
{
EXPECT_TRUE(cmpF(out_cnz_gapi, out_cnz_ocv));
}
SANITY_CHECK_NOTHING();
}
#pragma pop_macro("countNonZero")
//------------------------------------------------------------------------------
PERF_TEST_P_(AddWeightedPerfTest, TestPerformance)
@ -2086,7 +2124,7 @@ PERF_TEST_P_(SizePerfTest, TestPerformance)
// G-API code //////////////////////////////////////////////////////////////
cv::GMat in;
auto out = cv::gapi::size(in);
auto out = cv::gapi::streaming::size(in);
cv::GComputation c(cv::GIn(in), cv::GOut(out));
cv::Size out_sz;
@ -2118,7 +2156,7 @@ PERF_TEST_P_(SizeRPerfTest, TestPerformance)
// G-API code //////////////////////////////////////////////////////////////
cv::GOpaque<cv::Rect> op_rect;
auto out = cv::gapi::size(op_rect);
auto out = cv::gapi::streaming::size(op_rect);
cv::GComputation c(cv::GIn(op_rect), cv::GOut(out));
cv::Size out_sz;

View File

@ -42,10 +42,15 @@ class GoodFeaturesPerfTest : public TestPerfParams<tuple<compare_vector_f<cv:
int,int,double,double,int,bool,
cv::GCompileArgs>> {};
class EqHistPerfTest : public TestPerfParams<tuple<compare_f, cv::Size, cv::GCompileArgs>> {};
class BGR2RGBPerfTest : public TestPerfParams<tuple<compare_f, cv::Size, cv::GCompileArgs>> {};
class RGB2GrayPerfTest : public TestPerfParams<tuple<compare_f, cv::Size, cv::GCompileArgs>> {};
class BGR2GrayPerfTest : public TestPerfParams<tuple<compare_f, cv::Size, cv::GCompileArgs>> {};
class RGB2YUVPerfTest : public TestPerfParams<tuple<compare_f, cv::Size, cv::GCompileArgs>> {};
class YUV2RGBPerfTest : public TestPerfParams<tuple<compare_f, cv::Size, cv::GCompileArgs>> {};
class BGR2I420PerfTest : public TestPerfParams<tuple<compare_f, cv::Size, cv::GCompileArgs>> {};
class RGB2I420PerfTest : public TestPerfParams<tuple<compare_f, cv::Size, cv::GCompileArgs>> {};
class I4202BGRPerfTest : public TestPerfParams<tuple<compare_f, cv::Size, cv::GCompileArgs>> {};
class I4202RGBPerfTest : public TestPerfParams<tuple<compare_f, cv::Size, cv::GCompileArgs>> {};
class RGB2LabPerfTest : public TestPerfParams<tuple<compare_f, cv::Size, cv::GCompileArgs>> {};
class BGR2LUVPerfTest : public TestPerfParams<tuple<compare_f, cv::Size, cv::GCompileArgs>> {};
class LUV2BGRPerfTest : public TestPerfParams<tuple<compare_f, cv::Size, cv::GCompileArgs>> {};

View File

@ -788,6 +788,44 @@ PERF_TEST_P_(EqHistPerfTest, TestPerformance)
//------------------------------------------------------------------------------
PERF_TEST_P_(BGR2RGBPerfTest, TestPerformance)
{
compare_f cmpF;
cv::Size sz;
cv::GCompileArgs compile_args;
std::tie(cmpF, sz, compile_args) = GetParam();
initMatrixRandN(CV_8UC3, sz, CV_8UC3, false);
// OpenCV code /////////////////////////////////////////////////////////////
{
cv::cvtColor(in_mat1, out_mat_ocv, cv::COLOR_BGR2RGB);
}
// G-API code //////////////////////////////////////////////////////////////
cv::GMat in;
auto out = cv::gapi::BGR2RGB(in);
cv::GComputation c(in, out);
// Warm-up graph engine:
c.apply(in_mat1, out_mat_gapi, std::move(compile_args));
TEST_CYCLE()
{
c.apply(in_mat1, out_mat_gapi);
}
// Comparison //////////////////////////////////////////////////////////////
{
EXPECT_TRUE(cmpF(out_mat_gapi, out_mat_ocv));
EXPECT_EQ(out_mat_gapi.size(), sz);
}
SANITY_CHECK_NOTHING();
}
//------------------------------------------------------------------------------
PERF_TEST_P_(RGB2GrayPerfTest, TestPerformance)
{
compare_f cmpF = get<0>(GetParam());
@ -940,6 +978,158 @@ PERF_TEST_P_(YUV2RGBPerfTest, TestPerformance)
//------------------------------------------------------------------------------
PERF_TEST_P_(BGR2I420PerfTest, TestPerformance)
{
compare_f cmpF;
cv::Size sz;
cv::GCompileArgs compile_args;
std::tie(cmpF, sz, compile_args) = GetParam();
initMatrixRandN(CV_8UC3, sz, CV_8UC1, false);
// OpenCV code /////////////////////////////////////////////////////////////
{
cv::cvtColor(in_mat1, out_mat_ocv, cv::COLOR_BGR2YUV_I420);
}
// G-API code //////////////////////////////////////////////////////////////
cv::GMat in;
auto out = cv::gapi::BGR2I420(in);
cv::GComputation c(in, out);
// Warm-up graph engine:
c.apply(in_mat1, out_mat_gapi, std::move(compile_args));
TEST_CYCLE()
{
c.apply(in_mat1, out_mat_gapi);
}
// Comparison //////////////////////////////////////////////////////////////
{
EXPECT_TRUE(cmpF(out_mat_gapi, out_mat_ocv));
EXPECT_EQ(out_mat_gapi.size(), Size(sz.width, sz.height * 3 / 2));
}
SANITY_CHECK_NOTHING();
}
//------------------------------------------------------------------------------
PERF_TEST_P_(RGB2I420PerfTest, TestPerformance)
{
compare_f cmpF;
cv::Size sz;
cv::GCompileArgs compile_args;
std::tie(cmpF, sz, compile_args) = GetParam();
initMatrixRandN(CV_8UC3, sz, CV_8UC1, false);
// OpenCV code /////////////////////////////////////////////////////////////
{
cv::cvtColor(in_mat1, out_mat_ocv, cv::COLOR_RGB2YUV_I420);
}
// G-API code //////////////////////////////////////////////////////////////
cv::GMat in;
auto out = cv::gapi::RGB2I420(in);
cv::GComputation c(in, out);
// Warm-up graph engine:
c.apply(in_mat1, out_mat_gapi, std::move(compile_args));
TEST_CYCLE()
{
c.apply(in_mat1, out_mat_gapi);
}
// Comparison //////////////////////////////////////////////////////////////
{
EXPECT_TRUE(cmpF(out_mat_gapi, out_mat_ocv));
EXPECT_EQ(out_mat_gapi.size(), Size(sz.width, sz.height * 3 / 2));
}
SANITY_CHECK_NOTHING();
}
//------------------------------------------------------------------------------
PERF_TEST_P_(I4202BGRPerfTest, TestPerformance)
{
compare_f cmpF;
cv::Size sz;
cv::GCompileArgs compile_args;
std::tie(cmpF, sz, compile_args) = GetParam();
initMatrixRandN(CV_8UC1, sz, CV_8UC3, false);
// OpenCV code /////////////////////////////////////////////////////////////
{
cv::cvtColor(in_mat1, out_mat_ocv, cv::COLOR_YUV2BGR_I420);
}
// G-API code //////////////////////////////////////////////////////////////
cv::GMat in;
auto out = cv::gapi::I4202BGR(in);
cv::GComputation c(in, out);
// Warm-up graph engine:
c.apply(in_mat1, out_mat_gapi, std::move(compile_args));
TEST_CYCLE()
{
c.apply(in_mat1, out_mat_gapi);
}
// Comparison //////////////////////////////////////////////////////////////
{
EXPECT_TRUE(cmpF(out_mat_gapi, out_mat_ocv));
EXPECT_EQ(out_mat_gapi.size(), Size(sz.width, sz.height * 2 / 3));
}
SANITY_CHECK_NOTHING();
}
//------------------------------------------------------------------------------
PERF_TEST_P_(I4202RGBPerfTest, TestPerformance)
{
compare_f cmpF;
cv::Size sz;
cv::GCompileArgs compile_args;
std::tie(cmpF, sz, compile_args) = GetParam();
initMatrixRandN(CV_8UC1, sz, CV_8UC3, false);
// OpenCV code /////////////////////////////////////////////////////////////
{
cv::cvtColor(in_mat1, out_mat_ocv, cv::COLOR_YUV2RGB_I420);
}
// G-API code //////////////////////////////////////////////////////////////
cv::GMat in;
auto out = cv::gapi::I4202RGB(in);
cv::GComputation c(in, out);
// Warm-up graph engine:
c.apply(in_mat1, out_mat_gapi, std::move(compile_args));
TEST_CYCLE()
{
c.apply(in_mat1, out_mat_gapi);
}
// Comparison //////////////////////////////////////////////////////////////
{
EXPECT_TRUE(cmpF(out_mat_gapi, out_mat_ocv));
EXPECT_EQ(out_mat_gapi.size(), Size(sz.width, sz.height * 2 / 3));
}
SANITY_CHECK_NOTHING();
}
//------------------------------------------------------------------------------
PERF_TEST_P_(RGB2LabPerfTest, TestPerformance)
{
compare_f cmpF = get<0>(GetParam());

View File

@ -44,7 +44,7 @@ PERF_TEST_P_(BuildOptFlowPyramidPerfTest, TestPerformance)
outMaxLevelGAPI = static_cast<int>(outMaxLevelSc[0]);
// Comparison //////////////////////////////////////////////////////////////
compareOutputPyramids(outOCV, outGAPI);
compareOutputPyramids(outGAPI, outOCV);
SANITY_CHECK_NOTHING();
}
@ -74,7 +74,7 @@ PERF_TEST_P_(OptFlowLKPerfTest, TestPerformance)
}
// Comparison //////////////////////////////////////////////////////////////
compareOutputsOptFlow(outOCV, outGAPI);
compareOutputsOptFlow(outGAPI, outOCV);
SANITY_CHECK_NOTHING();
}
@ -109,7 +109,7 @@ PERF_TEST_P_(OptFlowLKForPyrPerfTest, TestPerformance)
}
// Comparison //////////////////////////////////////////////////////////////
compareOutputsOptFlow(outOCV, outGAPI);
compareOutputsOptFlow(outGAPI, outOCV);
SANITY_CHECK_NOTHING();
}
@ -147,7 +147,7 @@ PERF_TEST_P_(BuildPyr_CalcOptFlow_PipelinePerfTest, TestPerformance)
}
// Comparison //////////////////////////////////////////////////////////////
compareOutputsOptFlow(outOCV, outGAPI);
compareOutputsOptFlow(outGAPI, outOCV);
SANITY_CHECK_NOTHING();
}

View File

@ -2,7 +2,7 @@
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Copyright (C) 2018 Intel Corporation
// Copyright (C) 2018-2020 Intel Corporation
#include "../perf_precomp.hpp"
@ -160,6 +160,12 @@ INSTANTIATE_TEST_CASE_P(SumPerfTestCPU, SumPerfTest,
//Values(0.0),
Values(cv::compile_args(CORE_CPU))));
INSTANTIATE_TEST_CASE_P(CountNonZeroPerfTestCPU, CountNonZeroPerfTest,
Combine(Values(AbsToleranceScalar(0.0).to_compare_f()),
Values(szSmall128, szVGA, sz720p, sz1080p),
Values(CV_8UC1, CV_16UC1, CV_16SC1, CV_32FC1),
Values(cv::compile_args(CORE_CPU))));
INSTANTIATE_TEST_CASE_P(AddWeightedPerfTestCPU, AddWeightedPerfTest,
Combine(Values(Tolerance_FloatRel_IntAbs(1e-6, 1).to_compare_f()),
Values(szSmall128, szVGA, sz720p, sz1080p),

View File

@ -233,11 +233,6 @@ INSTANTIATE_TEST_CASE_P(Split3PerfTestFluid, Split3PerfTest,
// Values(cv::Rect(10, 8, 20, 35), cv::Rect(4, 10, 37, 50)),
// Values(cv::compile_args(CORE_FLUID))));
// INSTANTIATE_TEST_CASE_P(CopyPerfTestFluid, CopyPerfTest,
// Combine(Values(szSmall128, szVGA, sz720p, sz1080p),
// Values(CV_8UC1, CV_8UC3, CV_16UC1, CV_16SC1, CV_32FC1),
// Values(cv::compile_args(CORE_FLUID))));
// INSTANTIATE_TEST_CASE_P(ConcatHorPerfTestFluid, ConcatHorPerfTest,
// Combine(Values(szSmall128, szVGA, sz720p, sz1080p),
// Values(CV_8UC1, CV_8UC3, CV_16UC1, CV_16SC1, CV_32FC1),

View File

@ -179,6 +179,11 @@ INSTANTIATE_TEST_CASE_P(EqHistPerfTestCPU, EqHistPerfTest,
Values(szVGA, sz720p, sz1080p),
Values(cv::compile_args(IMGPROC_CPU))));
INSTANTIATE_TEST_CASE_P(BGR2RGBPerfTestCPU, BGR2RGBPerfTest,
Combine(Values(AbsExact().to_compare_f()),
Values(szVGA, sz720p, sz1080p),
Values(cv::compile_args(IMGPROC_CPU))));
INSTANTIATE_TEST_CASE_P(RGB2GrayPerfTestCPU, RGB2GrayPerfTest,
Combine(Values(AbsExact().to_compare_f()),
Values(szVGA, sz720p, sz1080p),
@ -199,6 +204,26 @@ INSTANTIATE_TEST_CASE_P(YUV2RGBPerfTestCPU, YUV2RGBPerfTest,
Values(szVGA, sz720p, sz1080p),
Values(cv::compile_args(IMGPROC_CPU))));
INSTANTIATE_TEST_CASE_P(BGR2I420PerfTestCPU, BGR2I420PerfTest,
Combine(Values(AbsExact().to_compare_f()),
Values(szVGA, sz720p, sz1080p),
Values(cv::compile_args(IMGPROC_CPU))));
INSTANTIATE_TEST_CASE_P(RGB2I420PerfTestCPU, RGB2I420PerfTest,
Combine(Values(AbsExact().to_compare_f()),
Values(szVGA, sz720p, sz1080p),
Values(cv::compile_args(IMGPROC_CPU))));
INSTANTIATE_TEST_CASE_P(I4202BGRPerfTestCPU, I4202BGRPerfTest,
Combine(Values(AbsExact().to_compare_f()),
Values(szVGA, sz720p, sz1080p),
Values(cv::compile_args(IMGPROC_CPU))));
INSTANTIATE_TEST_CASE_P(I4202RGBPerfTestCPU, I4202RGBPerfTest,
Combine(Values(AbsExact().to_compare_f()),
Values(szVGA, sz720p, sz1080p),
Values(cv::compile_args(IMGPROC_CPU))));
INSTANTIATE_TEST_CASE_P(RGB2LabPerfTestCPU, RGB2LabPerfTest,
Combine(Values(AbsExact().to_compare_f()),
Values(szVGA, sz720p, sz1080p),

View File

@ -2,7 +2,7 @@
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Copyright (C) 2018 Intel Corporation
// Copyright (C) 2018-2020 Intel Corporation
#include "../perf_precomp.hpp"
@ -157,6 +157,12 @@ INSTANTIATE_TEST_CASE_P(SumPerfTestGPU, SumPerfTest,
Values( CV_8UC1, CV_8UC3, CV_16UC1, CV_16SC1, CV_32FC1 ),
Values(cv::compile_args(CORE_GPU))));
INSTANTIATE_TEST_CASE_P(CountNonZeroPerfTestGPU, CountNonZeroPerfTest,
Combine(Values(AbsToleranceScalar(0.0).to_compare_f()),
Values(szSmall128, szVGA, sz720p, sz1080p),
Values(CV_8UC1, CV_16UC1, CV_16SC1, CV_32FC1),
Values(cv::compile_args(CORE_GPU))));
INSTANTIATE_TEST_CASE_P(AddWeightedPerfTestGPU, AddWeightedPerfTest,
Combine(Values(Tolerance_FloatRel_IntAbs(1e-6, 1).to_compare_f()),
Values( szSmall128, szVGA, sz720p, sz1080p ),
@ -226,11 +232,6 @@ INSTANTIATE_TEST_CASE_P(CropPerfTestGPU, CropPerfTest,
Values(cv::Rect(10, 8, 20, 35), cv::Rect(4, 10, 37, 50)),
Values(cv::compile_args(CORE_GPU))));
INSTANTIATE_TEST_CASE_P(CopyPerfTestGPU, CopyPerfTest,
Combine(Values( szSmall128, szVGA, sz720p, sz1080p ),
Values( CV_8UC1, CV_8UC3, CV_16UC1, CV_16SC1, CV_32FC1 ),
Values(cv::compile_args(CORE_GPU))));
INSTANTIATE_TEST_CASE_P(ConcatHorPerfTestGPU, ConcatHorPerfTest,
Combine(Values( szSmall128, szVGA, sz720p, sz1080p ),
Values( CV_8UC1, CV_8UC3, CV_16UC1, CV_16SC1, CV_32FC1 ),

View File

@ -0,0 +1,195 @@
#include <chrono>
#include <iomanip>
#include "opencv2/imgproc.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/gapi.hpp"
#include "opencv2/gapi/core.hpp"
#include "opencv2/gapi/imgproc.hpp"
#include "opencv2/gapi/infer.hpp"
#include "opencv2/gapi/infer/ie.hpp"
#include "opencv2/gapi/infer/onnx.hpp"
#include "opencv2/gapi/cpu/gcpukernel.hpp"
#include "opencv2/gapi/streaming/cap.hpp"
namespace {
const std::string keys =
"{ h help | | print this help message }"
"{ input | | Path to an input video file }"
"{ fdm | | IE face detection model IR }"
"{ fdw | | IE face detection model weights }"
"{ fdd | | IE face detection device }"
"{ emom | | ONNX emotions recognition model }"
"{ output | | (Optional) Path to an output video file }"
;
} // namespace
namespace custom {
G_API_NET(Faces, <cv::GMat(cv::GMat)>, "face-detector");
G_API_NET(Emotions, <cv::GMat(cv::GMat)>, "emotions-recognition");
G_API_OP(PostProc, <cv::GArray<cv::Rect>(cv::GMat, cv::GMat)>, "custom.fd_postproc") {
static cv::GArrayDesc outMeta(const cv::GMatDesc &, const cv::GMatDesc &) {
return cv::empty_array_desc();
}
};
GAPI_OCV_KERNEL(OCVPostProc, PostProc) {
static void run(const cv::Mat &in_ssd_result,
const cv::Mat &in_frame,
std::vector<cv::Rect> &out_faces) {
const int MAX_PROPOSALS = 200;
const int OBJECT_SIZE = 7;
const cv::Size upscale = in_frame.size();
const cv::Rect surface({0,0}, upscale);
out_faces.clear();
const float *data = in_ssd_result.ptr<float>();
for (int i = 0; i < MAX_PROPOSALS; i++) {
const float image_id = data[i * OBJECT_SIZE + 0]; // batch id
const float confidence = data[i * OBJECT_SIZE + 2];
const float rc_left = data[i * OBJECT_SIZE + 3];
const float rc_top = data[i * OBJECT_SIZE + 4];
const float rc_right = data[i * OBJECT_SIZE + 5];
const float rc_bottom = data[i * OBJECT_SIZE + 6];
if (image_id < 0.f) { // indicates end of detections
break;
}
if (confidence < 0.5f) {
continue;
}
cv::Rect rc;
rc.x = static_cast<int>(rc_left * upscale.width);
rc.y = static_cast<int>(rc_top * upscale.height);
rc.width = static_cast<int>(rc_right * upscale.width) - rc.x;
rc.height = static_cast<int>(rc_bottom * upscale.height) - rc.y;
out_faces.push_back(rc & surface);
}
}
};
//! [Postproc]
} // namespace custom
namespace labels {
// Labels as defined in
// https://github.com/onnx/models/tree/master/vision/body_analysis/emotion_ferplus
//
const std::string emotions[] = {
"neutral", "happiness", "surprise", "sadness", "anger", "disgust", "fear", "contempt"
};
namespace {
template<typename Iter>
std::vector<float> softmax(Iter begin, Iter end) {
std::vector<float> prob(end - begin, 0.f);
std::transform(begin, end, prob.begin(), [](float x) { return std::exp(x); });
float sum = std::accumulate(prob.begin(), prob.end(), 0.0f);
for (int i = 0; i < static_cast<int>(prob.size()); i++)
prob[i] /= sum;
return prob;
}
void DrawResults(cv::Mat &frame,
const std::vector<cv::Rect> &faces,
const std::vector<cv::Mat> &out_emotions) {
CV_Assert(faces.size() == out_emotions.size());
for (auto it = faces.begin(); it != faces.end(); ++it) {
const auto idx = std::distance(faces.begin(), it);
const auto &rc = *it;
const float *emotions_data = out_emotions[idx].ptr<float>();
auto sm = softmax(emotions_data, emotions_data + 8);
const auto emo_id = std::max_element(sm.begin(), sm.end()) - sm.begin();
const int ATTRIB_OFFSET = 15;
cv::rectangle(frame, rc, {0, 255, 0}, 4);
cv::putText(frame, emotions[emo_id],
cv::Point(rc.x, rc.y - ATTRIB_OFFSET),
cv::FONT_HERSHEY_COMPLEX_SMALL,
1,
cv::Scalar(0, 0, 255));
std::cout << emotions[emo_id] << " at " << rc << std::endl;
}
}
} // anonymous namespace
} // namespace labels
int main(int argc, char *argv[])
{
cv::CommandLineParser cmd(argc, argv, keys);
if (cmd.has("help")) {
cmd.printMessage();
return 0;
}
const std::string input = cmd.get<std::string>("input");
const std::string output = cmd.get<std::string>("output");
// OpenVINO FD parameters here
auto det_net = cv::gapi::ie::Params<custom::Faces> {
cmd.get<std::string>("fdm"), // read cmd args: path to topology IR
cmd.get<std::string>("fdw"), // read cmd args: path to weights
cmd.get<std::string>("fdd"), // read cmd args: device specifier
};
// ONNX Emotions parameters here
auto emo_net = cv::gapi::onnx::Params<custom::Emotions> {
cmd.get<std::string>("emom"), // read cmd args: path to the ONNX model
}.cfgNormalize({false}); // model accepts 0..255 range in FP32
auto kernels = cv::gapi::kernels<custom::OCVPostProc>();
auto networks = cv::gapi::networks(det_net, emo_net);
cv::GMat in;
cv::GMat bgr = cv::gapi::copy(in);
cv::GMat frame = cv::gapi::streaming::desync(bgr);
cv::GMat detections = cv::gapi::infer<custom::Faces>(frame);
cv::GArray<cv::Rect> faces = custom::PostProc::on(detections, frame);
cv::GArray<cv::GMat> emotions = cv::gapi::infer<custom::Emotions>(faces, frame);
auto pipeline = cv::GComputation(cv::GIn(in), cv::GOut(bgr, faces, emotions))
.compileStreaming(cv::compile_args(kernels, networks));
auto in_src = cv::gapi::wip::make_src<cv::gapi::wip::GCaptureSource>(input);
pipeline.setSource(cv::gin(in_src));
pipeline.start();
cv::util::optional<cv::Mat> out_frame;
cv::util::optional<std::vector<cv::Rect>> out_faces;
cv::util::optional<std::vector<cv::Mat>> out_emotions;
cv::Mat last_mat;
std::vector<cv::Rect> last_faces;
std::vector<cv::Mat> last_emotions;
cv::VideoWriter writer;
while (pipeline.pull(cv::gout(out_frame, out_faces, out_emotions))) {
if (out_faces && out_emotions) {
last_faces = *out_faces;
last_emotions = *out_emotions;
}
if (out_frame) {
last_mat = *out_frame;
labels::DrawResults(last_mat, last_faces, last_emotions);
if (!output.empty()) {
if (!writer.isOpened()) {
const auto sz = cv::Size{last_mat.cols, last_mat.rows};
writer.open(output, cv::VideoWriter::fourcc('M','J','P','G'), 25.0, sz);
CV_Assert(writer.isOpened());
}
writer << last_mat;
}
}
if (!last_mat.empty()) {
cv::imshow("Out", last_mat);
cv::waitKey(1);
}
}
return 0;
}

View File

@ -0,0 +1,264 @@
#include <algorithm>
#include <iostream>
#include <sstream>
#include <opencv2/imgproc.hpp>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/gapi.hpp>
#include <opencv2/gapi/core.hpp>
#include <opencv2/gapi/imgproc.hpp>
#include <opencv2/gapi/infer.hpp>
#include <opencv2/gapi/render.hpp>
#include <opencv2/gapi/infer/ie.hpp>
#include <opencv2/gapi/cpu/gcpukernel.hpp>
#include <opencv2/gapi/streaming/cap.hpp>
#include <opencv2/highgui.hpp>
const std::string keys =
"{ h help | | Print this help message }"
"{ input | | Path to the input video file }"
"{ facem | face-detection-adas-0001.xml | Path to OpenVINO IE face detection model (.xml) }"
"{ faced | CPU | Target device for face detection model (e.g. CPU, GPU, VPU, ...) }"
"{ r roi | -1,-1,-1,-1 | Region of interest (ROI) to use for inference. Identified automatically when not set }";
namespace {
std::string weights_path(const std::string &model_path) {
const auto EXT_LEN = 4u;
const auto sz = model_path.size();
CV_Assert(sz > EXT_LEN);
auto ext = model_path.substr(sz - EXT_LEN);
std::transform(ext.begin(), ext.end(), ext.begin(), [](unsigned char c){
return static_cast<unsigned char>(std::tolower(c));
});
CV_Assert(ext == ".xml");
return model_path.substr(0u, sz - EXT_LEN) + ".bin";
}
cv::util::optional<cv::Rect> parse_roi(const std::string &rc) {
cv::Rect rv;
char delim[3];
std::stringstream is(rc);
is >> rv.x >> delim[0] >> rv.y >> delim[1] >> rv.width >> delim[2] >> rv.height;
if (is.bad()) {
return cv::util::optional<cv::Rect>(); // empty value
}
const auto is_delim = [](char c) {
return c == ',';
};
if (!std::all_of(std::begin(delim), std::end(delim), is_delim)) {
return cv::util::optional<cv::Rect>(); // empty value
}
if (rv.x < 0 || rv.y < 0 || rv.width <= 0 || rv.height <= 0) {
return cv::util::optional<cv::Rect>(); // empty value
}
return cv::util::make_optional(std::move(rv));
}
} // namespace
namespace custom {
G_API_NET(FaceDetector, <cv::GMat(cv::GMat)>, "face-detector");
using GDetections = cv::GArray<cv::Rect>;
using GRect = cv::GOpaque<cv::Rect>;
using GSize = cv::GOpaque<cv::Size>;
using GPrims = cv::GArray<cv::gapi::wip::draw::Prim>;
G_API_OP(GetSize, <GSize(cv::GMat)>, "sample.custom.get-size") {
static cv::GOpaqueDesc outMeta(const cv::GMatDesc &) {
return cv::empty_gopaque_desc();
}
};
G_API_OP(LocateROI, <GRect(cv::GMat)>, "sample.custom.locate-roi") {
static cv::GOpaqueDesc outMeta(const cv::GMatDesc &) {
return cv::empty_gopaque_desc();
}
};
G_API_OP(ParseSSD, <GDetections(cv::GMat, GRect, GSize)>, "sample.custom.parse-ssd") {
static cv::GArrayDesc outMeta(const cv::GMatDesc &, const cv::GOpaqueDesc &, const cv::GOpaqueDesc &) {
return cv::empty_array_desc();
}
};
G_API_OP(BBoxes, <GPrims(GDetections, GRect)>, "sample.custom.b-boxes") {
static cv::GArrayDesc outMeta(const cv::GArrayDesc &, const cv::GOpaqueDesc &) {
return cv::empty_array_desc();
}
};
GAPI_OCV_KERNEL(OCVGetSize, GetSize) {
static void run(const cv::Mat &in, cv::Size &out) {
out = {in.cols, in.rows};
}
};
GAPI_OCV_KERNEL(OCVLocateROI, LocateROI) {
// This is the place where we can run extra analytics
// on the input image frame and select the ROI (region
// of interest) where we want to detect our objects (or
// run any other inference).
//
// Currently it doesn't do anything intelligent,
// but only crops the input image to square (this is
// the most convenient aspect ratio for detectors to use)
static void run(const cv::Mat &in_mat, cv::Rect &out_rect) {
// Identify the central point & square size (- some padding)
const auto center = cv::Point{in_mat.cols/2, in_mat.rows/2};
auto sqside = std::min(in_mat.cols, in_mat.rows);
// Now build the central square ROI
out_rect = cv::Rect{ center.x - sqside/2
, center.y - sqside/2
, sqside
, sqside
};
}
};
GAPI_OCV_KERNEL(OCVParseSSD, ParseSSD) {
static void run(const cv::Mat &in_ssd_result,
const cv::Rect &in_roi,
const cv::Size &in_parent_size,
std::vector<cv::Rect> &out_objects) {
const auto &in_ssd_dims = in_ssd_result.size;
CV_Assert(in_ssd_dims.dims() == 4u);
const int MAX_PROPOSALS = in_ssd_dims[2];
const int OBJECT_SIZE = in_ssd_dims[3];
CV_Assert(OBJECT_SIZE == 7); // fixed SSD object size
const cv::Size up_roi = in_roi.size();
const cv::Rect surface({0,0}, in_parent_size);
out_objects.clear();
const float *data = in_ssd_result.ptr<float>();
for (int i = 0; i < MAX_PROPOSALS; i++) {
const float image_id = data[i * OBJECT_SIZE + 0];
const float label = data[i * OBJECT_SIZE + 1];
const float confidence = data[i * OBJECT_SIZE + 2];
const float rc_left = data[i * OBJECT_SIZE + 3];
const float rc_top = data[i * OBJECT_SIZE + 4];
const float rc_right = data[i * OBJECT_SIZE + 5];
const float rc_bottom = data[i * OBJECT_SIZE + 6];
(void) label; // unused
if (image_id < 0.f) {
break; // marks end-of-detections
}
if (confidence < 0.5f) {
continue; // skip objects with low confidence
}
// map relative coordinates to the original image scale
// taking the ROI into account
cv::Rect rc;
rc.x = static_cast<int>(rc_left * up_roi.width);
rc.y = static_cast<int>(rc_top * up_roi.height);
rc.width = static_cast<int>(rc_right * up_roi.width) - rc.x;
rc.height = static_cast<int>(rc_bottom * up_roi.height) - rc.y;
rc.x += in_roi.x;
rc.y += in_roi.y;
out_objects.emplace_back(rc & surface);
}
}
};
GAPI_OCV_KERNEL(OCVBBoxes, BBoxes) {
// This kernel converts the rectangles into G-API's
// rendering primitives
static void run(const std::vector<cv::Rect> &in_face_rcs,
const cv::Rect &in_roi,
std::vector<cv::gapi::wip::draw::Prim> &out_prims) {
out_prims.clear();
const auto cvt = [](const cv::Rect &rc, const cv::Scalar &clr) {
return cv::gapi::wip::draw::Rect(rc, clr, 2);
};
out_prims.emplace_back(cvt(in_roi, CV_RGB(0,255,255))); // cyan
for (auto &&rc : in_face_rcs) {
out_prims.emplace_back(cvt(rc, CV_RGB(0,255,0))); // green
}
}
};
} // namespace custom
int main(int argc, char *argv[])
{
cv::CommandLineParser cmd(argc, argv, keys);
if (cmd.has("help")) {
cmd.printMessage();
return 0;
}
// Prepare parameters first
const std::string input = cmd.get<std::string>("input");
const auto opt_roi = parse_roi(cmd.get<std::string>("roi"));
const auto face_model_path = cmd.get<std::string>("facem");
auto face_net = cv::gapi::ie::Params<custom::FaceDetector> {
face_model_path, // path to topology IR
weights_path(face_model_path), // path to weights
cmd.get<std::string>("faced"), // device specifier
};
auto kernels = cv::gapi::kernels
< custom::OCVGetSize
, custom::OCVLocateROI
, custom::OCVParseSSD
, custom::OCVBBoxes>();
auto networks = cv::gapi::networks(face_net);
// Now build the graph. The graph structure may vary
// pased on the input parameters
cv::GStreamingCompiled pipeline;
auto inputs = cv::gin(cv::gapi::wip::make_src<cv::gapi::wip::GCaptureSource>(input));
if (opt_roi.has_value()) {
// Use the value provided by user
std::cout << "Will run inference for static region "
<< opt_roi.value()
<< " only"
<< std::endl;
cv::GMat in;
cv::GOpaque<cv::Rect> in_roi;
auto blob = cv::gapi::infer<custom::FaceDetector>(in_roi, in);
auto rcs = custom::ParseSSD::on(blob, in_roi, custom::GetSize::on(in));
auto out = cv::gapi::wip::draw::render3ch(in, custom::BBoxes::on(rcs, in_roi));
pipeline = cv::GComputation(cv::GIn(in, in_roi), cv::GOut(out))
.compileStreaming(cv::compile_args(kernels, networks));
// Since the ROI to detect is manual, make it part of the input vector
inputs.push_back(cv::gin(opt_roi.value())[0]);
} else {
// Automatically detect ROI to infer. Make it output parameter
std::cout << "ROI is not set or invalid. Locating it automatically"
<< std::endl;
cv::GMat in;
cv::GOpaque<cv::Rect> roi = custom::LocateROI::on(in);
auto blob = cv::gapi::infer<custom::FaceDetector>(roi, in);
auto rcs = custom::ParseSSD::on(blob, roi, custom::GetSize::on(in));
auto out = cv::gapi::wip::draw::render3ch(in, custom::BBoxes::on(rcs, roi));
pipeline = cv::GComputation(cv::GIn(in), cv::GOut(out))
.compileStreaming(cv::compile_args(kernels, networks));
}
// The execution part
pipeline.setSource(std::move(inputs));
pipeline.start();
cv::Mat out;
while (pipeline.pull(cv::gout(out))) {
cv::imshow("Out", out);
cv::waitKey(1);
}
return 0;
}

View File

@ -0,0 +1,213 @@
#include <algorithm>
#include <iostream>
#include <sstream>
#include <opencv2/imgproc.hpp>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/gapi.hpp>
#include <opencv2/gapi/core.hpp>
#include <opencv2/gapi/imgproc.hpp>
#include <opencv2/gapi/infer.hpp>
#include <opencv2/gapi/render.hpp>
#include <opencv2/gapi/infer/onnx.hpp>
#include <opencv2/gapi/cpu/gcpukernel.hpp>
#include <opencv2/gapi/streaming/cap.hpp>
#include <opencv2/highgui.hpp>
namespace custom {
G_API_NET(ObjDetector, <cv::GMat(cv::GMat)>, "object-detector");
using GDetections = cv::GArray<cv::Rect>;
using GSize = cv::GOpaque<cv::Size>;
using GPrims = cv::GArray<cv::gapi::wip::draw::Prim>;
G_API_OP(GetSize, <GSize(cv::GMat)>, "sample.custom.get-size") {
static cv::GOpaqueDesc outMeta(const cv::GMatDesc &) {
return cv::empty_gopaque_desc();
}
};
G_API_OP(ParseSSD, <GDetections(cv::GMat, GSize)>, "sample.custom.parse-ssd") {
static cv::GArrayDesc outMeta(const cv::GMatDesc &, const cv::GOpaqueDesc &) {
return cv::empty_array_desc();
}
};
G_API_OP(BBoxes, <GPrims(GDetections)>, "sample.custom.b-boxes") {
static cv::GArrayDesc outMeta(const cv::GArrayDesc &) {
return cv::empty_array_desc();
}
};
GAPI_OCV_KERNEL(OCVGetSize, GetSize) {
static void run(const cv::Mat &in, cv::Size &out) {
out = {in.cols, in.rows};
}
};
GAPI_OCV_KERNEL(OCVParseSSD, ParseSSD) {
static void run(const cv::Mat &in_ssd_result,
const cv::Size &in_parent_size,
std::vector<cv::Rect> &out_objects) {
const auto &in_ssd_dims = in_ssd_result.size;
CV_Assert(in_ssd_dims.dims() == 4u);
const int MAX_PROPOSALS = in_ssd_dims[2];
const int OBJECT_SIZE = in_ssd_dims[3];
CV_Assert(OBJECT_SIZE == 7); // fixed SSD object size
const cv::Rect surface({0,0}, in_parent_size);
out_objects.clear();
const float *data = in_ssd_result.ptr<float>();
for (int i = 0; i < MAX_PROPOSALS; i++) {
const float image_id = data[i * OBJECT_SIZE + 0];
const float label = data[i * OBJECT_SIZE + 1];
const float confidence = data[i * OBJECT_SIZE + 2];
const float rc_left = data[i * OBJECT_SIZE + 3];
const float rc_top = data[i * OBJECT_SIZE + 4];
const float rc_right = data[i * OBJECT_SIZE + 5];
const float rc_bottom = data[i * OBJECT_SIZE + 6];
(void) label; // unused
if (image_id < 0.f) {
break; // marks end-of-detections
}
if (confidence < 0.5f) {
continue; // skip objects with low confidence
}
// map relative coordinates to the original image scale
cv::Rect rc;
rc.x = static_cast<int>(rc_left * in_parent_size.width);
rc.y = static_cast<int>(rc_top * in_parent_size.height);
rc.width = static_cast<int>(rc_right * in_parent_size.width) - rc.x;
rc.height = static_cast<int>(rc_bottom * in_parent_size.height) - rc.y;
out_objects.emplace_back(rc & surface);
}
}
};
GAPI_OCV_KERNEL(OCVBBoxes, BBoxes) {
// This kernel converts the rectangles into G-API's
// rendering primitives
static void run(const std::vector<cv::Rect> &in_obj_rcs,
std::vector<cv::gapi::wip::draw::Prim> &out_prims) {
out_prims.clear();
const auto cvt = [](const cv::Rect &rc, const cv::Scalar &clr) {
return cv::gapi::wip::draw::Rect(rc, clr, 2);
};
for (auto &&rc : in_obj_rcs) {
out_prims.emplace_back(cvt(rc, CV_RGB(0,255,0))); // green
}
std::cout << "Detections:";
for (auto &&rc : in_obj_rcs) std::cout << ' ' << rc;
std::cout << std::endl;
}
};
} // namespace custom
namespace {
void remap_ssd_ports(const std::unordered_map<std::string, cv::Mat> &onnx,
std::unordered_map<std::string, cv::Mat> &gapi) {
// Assemble ONNX-processed outputs back to a single 1x1x200x7 blob
// to preserve compatibility with OpenVINO-based SSD pipeline
const cv::Mat &num_detections = onnx.at("num_detections:0");
const cv::Mat &detection_boxes = onnx.at("detection_boxes:0");
const cv::Mat &detection_scores = onnx.at("detection_scores:0");
const cv::Mat &detection_classes = onnx.at("detection_classes:0");
GAPI_Assert(num_detections.depth() == CV_32F);
GAPI_Assert(detection_boxes.depth() == CV_32F);
GAPI_Assert(detection_scores.depth() == CV_32F);
GAPI_Assert(detection_classes.depth() == CV_32F);
cv::Mat &ssd_output = gapi.at("detection_output");
const int num_objects = static_cast<int>(num_detections.ptr<float>()[0]);
const float *in_boxes = detection_boxes.ptr<float>();
const float *in_scores = detection_scores.ptr<float>();
const float *in_classes = detection_classes.ptr<float>();
float *ptr = ssd_output.ptr<float>();
for (int i = 0; i < num_objects; i++) {
ptr[0] = 0.f; // "image_id"
ptr[1] = in_classes[i]; // "label"
ptr[2] = in_scores[i]; // "confidence"
ptr[3] = in_boxes[4*i + 1]; // left
ptr[4] = in_boxes[4*i + 0]; // top
ptr[5] = in_boxes[4*i + 3]; // right
ptr[6] = in_boxes[4*i + 2]; // bottom
ptr += 7;
in_boxes += 4;
}
if (num_objects < ssd_output.size[2]-1) {
// put a -1 mark at the end of output blob if there is space left
ptr[0] = -1.f;
}
}
} // anonymous namespace
const std::string keys =
"{ h help | | Print this help message }"
"{ input | | Path to the input video file }"
"{ output | | (Optional) path to output video file }"
"{ detm | | Path to an ONNX SSD object detection model (.onnx) }"
;
int main(int argc, char *argv[])
{
cv::CommandLineParser cmd(argc, argv, keys);
if (cmd.has("help")) {
cmd.printMessage();
return 0;
}
// Prepare parameters first
const std::string input = cmd.get<std::string>("input");
const std::string output = cmd.get<std::string>("output");
const auto obj_model_path = cmd.get<std::string>("detm");
auto obj_net = cv::gapi::onnx::Params<custom::ObjDetector>{obj_model_path}
.cfgOutputLayers({"detection_output"})
.cfgPostProc({cv::GMatDesc{CV_32F, {1,1,200,7}}}, remap_ssd_ports);
auto kernels = cv::gapi::kernels< custom::OCVGetSize
, custom::OCVParseSSD
, custom::OCVBBoxes>();
auto networks = cv::gapi::networks(obj_net);
// Now build the graph
cv::GMat in;
auto blob = cv::gapi::infer<custom::ObjDetector>(in);
auto rcs = custom::ParseSSD::on(blob, custom::GetSize::on(in));
auto out = cv::gapi::wip::draw::render3ch(in, custom::BBoxes::on(rcs));
cv::GStreamingCompiled pipeline = cv::GComputation(cv::GIn(in), cv::GOut(out))
.compileStreaming(cv::compile_args(kernels, networks));
auto inputs = cv::gin(cv::gapi::wip::make_src<cv::gapi::wip::GCaptureSource>(input));
// The execution part
pipeline.setSource(std::move(inputs));
pipeline.start();
cv::VideoWriter writer;
cv::Mat outMat;
while (pipeline.pull(cv::gout(outMat))) {
cv::imshow("Out", outMat);
cv::waitKey(1);
if (!output.empty()) {
if (!writer.isOpened()) {
const auto sz = cv::Size{outMat.cols, outMat.rows};
writer.open(output, cv::VideoWriter::fourcc('M','J','P','G'), 25.0, sz);
CV_Assert(writer.isOpened());
}
writer << outMat;
}
}
return 0;
}

View File

@ -0,0 +1,19 @@
#include <opencv2/gapi.hpp> // G-API framework header
#include <opencv2/gapi/imgproc.hpp> // cv::gapi::blur()
#include <opencv2/highgui.hpp> // cv::imread/imwrite
int main(int argc, char *argv[]) {
if (argc < 3) return 1;
cv::GMat in; // Express the graph:
cv::GMat out = cv::gapi::blur(in, cv::Size(3,3)); // `out` is a result of `blur` of `in`
cv::Mat in_mat = cv::imread(argv[1]); // Get the real data
cv::Mat out_mat; // Output buffer (may be empty)
cv::GComputation(cv::GIn(in), cv::GOut(out)) // Declare a graph from `in` to `out`
.apply(cv::gin(in_mat), cv::gout(out_mat)); // ...and run it immediately
cv::imwrite(argv[2], out_mat); // Save the result
return 0;
}

View File

@ -0,0 +1,698 @@
#include <algorithm>
#include <cctype>
#include <cmath>
#include <iostream>
#include <limits>
#include <numeric>
#include <stdexcept>
#include <string>
#include <vector>
#include <opencv2/gapi.hpp>
#include <opencv2/gapi/core.hpp>
#include <opencv2/gapi/cpu/gcpukernel.hpp>
#include <opencv2/gapi/infer.hpp>
#include <opencv2/gapi/infer/ie.hpp>
#include <opencv2/gapi/streaming/cap.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/core/utility.hpp>
const std::string about =
"This is an OpenCV-based version of OMZ Text Detection example";
const std::string keys =
"{ h help | | Print this help message }"
"{ input | | Path to the input video file }"
"{ tdm | text-detection-0004.xml | Path to OpenVINO text detection model (.xml), versions 0003 and 0004 work }"
"{ tdd | CPU | Target device for the text detector (e.g. CPU, GPU, VPU, ...) }"
"{ trm | text-recognition-0012.xml | Path to OpenVINO text recognition model (.xml) }"
"{ trd | CPU | Target device for the text recognition (e.g. CPU, GPU, VPU, ...) }"
"{ bw | 0 | CTC beam search decoder bandwidth, if 0, a CTC greedy decoder is used}"
"{ sset | 0123456789abcdefghijklmnopqrstuvwxyz | Symbol set to use with text recognition decoder. Shouldn't contain symbol #. }"
"{ thr | 0.2 | Text recognition confidence threshold}"
;
namespace {
std::string weights_path(const std::string &model_path) {
const auto EXT_LEN = 4u;
const auto sz = model_path.size();
CV_Assert(sz > EXT_LEN);
const auto ext = model_path.substr(sz - EXT_LEN);
CV_Assert(cv::toLowerCase(ext) == ".xml");
return model_path.substr(0u, sz - EXT_LEN) + ".bin";
}
//////////////////////////////////////////////////////////////////////
// Taken from OMZ samples as-is
template<typename Iter>
void softmax_and_choose(Iter begin, Iter end, int *argmax, float *prob) {
auto max_element = std::max_element(begin, end);
*argmax = static_cast<int>(std::distance(begin, max_element));
float max_val = *max_element;
double sum = 0;
for (auto i = begin; i != end; i++) {
sum += std::exp((*i) - max_val);
}
if (std::fabs(sum) < std::numeric_limits<double>::epsilon()) {
throw std::logic_error("sum can't be equal to zero");
}
*prob = 1.0f / static_cast<float>(sum);
}
template<typename Iter>
std::vector<float> softmax(Iter begin, Iter end) {
std::vector<float> prob(end - begin, 0.f);
std::transform(begin, end, prob.begin(), [](float x) { return std::exp(x); });
float sum = std::accumulate(prob.begin(), prob.end(), 0.0f);
for (int i = 0; i < static_cast<int>(prob.size()); i++)
prob[i] /= sum;
return prob;
}
struct BeamElement {
std::vector<int> sentence; //!< The sequence of chars that will be a result of the beam element
float prob_blank; //!< The probability that the last char in CTC sequence
//!< for the beam element is the special blank char
float prob_not_blank; //!< The probability that the last char in CTC sequence
//!< for the beam element is NOT the special blank char
float prob() const { //!< The probability of the beam element.
return prob_blank + prob_not_blank;
}
};
std::string CTCGreedyDecoder(const float *data,
const std::size_t sz,
const std::string &alphabet,
const char pad_symbol,
double *conf) {
std::string res = "";
bool prev_pad = false;
*conf = 1;
const auto num_classes = alphabet.length();
for (auto it = data; it != (data+sz); it += num_classes) {
int argmax = 0;
float prob = 0.f;
softmax_and_choose(it, it + num_classes, &argmax, &prob);
(*conf) *= prob;
auto symbol = alphabet[argmax];
if (symbol != pad_symbol) {
if (res.empty() || prev_pad || (!res.empty() && symbol != res.back())) {
prev_pad = false;
res += symbol;
}
} else {
prev_pad = true;
}
}
return res;
}
std::string CTCBeamSearchDecoder(const float *data,
const std::size_t sz,
const std::string &alphabet,
double *conf,
int bandwidth) {
const auto num_classes = alphabet.length();
std::vector<BeamElement> curr;
std::vector<BeamElement> last;
last.push_back(BeamElement{std::vector<int>(), 1.f, 0.f});
for (auto it = data; it != (data+sz); it += num_classes) {
curr.clear();
std::vector<float> prob = softmax(it, it + num_classes);
for(const auto& candidate: last) {
float prob_not_blank = 0.f;
const std::vector<int>& candidate_sentence = candidate.sentence;
if (!candidate_sentence.empty()) {
int n = candidate_sentence.back();
prob_not_blank = candidate.prob_not_blank * prob[n];
}
float prob_blank = candidate.prob() * prob[num_classes - 1];
auto check_res = std::find_if(curr.begin(),
curr.end(),
[&candidate_sentence](const BeamElement& n) {
return n.sentence == candidate_sentence;
});
if (check_res == std::end(curr)) {
curr.push_back(BeamElement{candidate.sentence, prob_blank, prob_not_blank});
} else {
check_res->prob_not_blank += prob_not_blank;
if (check_res->prob_blank != 0.f) {
throw std::logic_error("Probability that the last char in CTC-sequence "
"is the special blank char must be zero here");
}
check_res->prob_blank = prob_blank;
}
for (int i = 0; i < static_cast<int>(num_classes) - 1; i++) {
auto extend = candidate_sentence;
extend.push_back(i);
if (candidate_sentence.size() > 0 && candidate.sentence.back() == i) {
prob_not_blank = prob[i] * candidate.prob_blank;
} else {
prob_not_blank = prob[i] * candidate.prob();
}
auto check_res2 = std::find_if(curr.begin(),
curr.end(),
[&extend](const BeamElement &n) {
return n.sentence == extend;
});
if (check_res2 == std::end(curr)) {
curr.push_back(BeamElement{extend, 0.f, prob_not_blank});
} else {
check_res2->prob_not_blank += prob_not_blank;
}
}
}
sort(curr.begin(), curr.end(), [](const BeamElement &a, const BeamElement &b) -> bool {
return a.prob() > b.prob();
});
last.clear();
int num_to_copy = std::min(bandwidth, static_cast<int>(curr.size()));
for (int b = 0; b < num_to_copy; b++) {
last.push_back(curr[b]);
}
}
*conf = last[0].prob();
std::string res="";
for (const auto& idx: last[0].sentence) {
res += alphabet[idx];
}
return res;
}
//////////////////////////////////////////////////////////////////////
} // anonymous namespace
namespace custom {
namespace {
//////////////////////////////////////////////////////////////////////
// Define networks for this sample
using GMat2 = std::tuple<cv::GMat, cv::GMat>;
G_API_NET(TextDetection,
<GMat2(cv::GMat)>,
"sample.custom.text_detect");
G_API_NET(TextRecognition,
<cv::GMat(cv::GMat)>,
"sample.custom.text_recogn");
// Define custom operations
using GSize = cv::GOpaque<cv::Size>;
using GRRects = cv::GArray<cv::RotatedRect>;
G_API_OP(PostProcess,
<GRRects(cv::GMat,cv::GMat,GSize,float,float)>,
"sample.custom.text.post_proc") {
static cv::GArrayDesc outMeta(const cv::GMatDesc &,
const cv::GMatDesc &,
const cv::GOpaqueDesc &,
float,
float) {
return cv::empty_array_desc();
}
};
using GMats = cv::GArray<cv::GMat>;
G_API_OP(CropLabels,
<GMats(cv::GMat,GRRects,GSize)>,
"sample.custom.text.crop") {
static cv::GArrayDesc outMeta(const cv::GMatDesc &,
const cv::GArrayDesc &,
const cv::GOpaqueDesc &) {
return cv::empty_array_desc();
}
};
//////////////////////////////////////////////////////////////////////
// Implement custom operations
GAPI_OCV_KERNEL(OCVPostProcess, PostProcess) {
static void run(const cv::Mat &link,
const cv::Mat &segm,
const cv::Size &img_size,
const float link_threshold,
const float segm_threshold,
std::vector<cv::RotatedRect> &out) {
// NOTE: Taken from the OMZ text detection sample almost as-is
const int kMinArea = 300;
const int kMinHeight = 10;
const float *link_data_pointer = link.ptr<float>();
std::vector<float> link_data(link_data_pointer, link_data_pointer + link.total());
link_data = transpose4d(link_data, dimsToShape(link.size), {0, 2, 3, 1});
softmax(link_data);
link_data = sliceAndGetSecondChannel(link_data);
std::vector<int> new_link_data_shape = {
link.size[0],
link.size[2],
link.size[3],
link.size[1]/2,
};
const float *cls_data_pointer = segm.ptr<float>();
std::vector<float> cls_data(cls_data_pointer, cls_data_pointer + segm.total());
cls_data = transpose4d(cls_data, dimsToShape(segm.size), {0, 2, 3, 1});
softmax(cls_data);
cls_data = sliceAndGetSecondChannel(cls_data);
std::vector<int> new_cls_data_shape = {
segm.size[0],
segm.size[2],
segm.size[3],
segm.size[1]/2,
};
out = maskToBoxes(decodeImageByJoin(cls_data, new_cls_data_shape,
link_data, new_link_data_shape,
segm_threshold, link_threshold),
static_cast<float>(kMinArea),
static_cast<float>(kMinHeight),
img_size);
}
static std::vector<std::size_t> dimsToShape(const cv::MatSize &sz) {
const int n_dims = sz.dims();
std::vector<std::size_t> result;
result.reserve(n_dims);
// cv::MatSize is not iterable...
for (int i = 0; i < n_dims; i++) {
result.emplace_back(static_cast<std::size_t>(sz[i]));
}
return result;
}
static void softmax(std::vector<float> &rdata) {
// NOTE: Taken from the OMZ text detection sample almost as-is
const size_t last_dim = 2;
for (size_t i = 0 ; i < rdata.size(); i+=last_dim) {
float m = std::max(rdata[i], rdata[i+1]);
rdata[i] = std::exp(rdata[i] - m);
rdata[i + 1] = std::exp(rdata[i + 1] - m);
float s = rdata[i] + rdata[i + 1];
rdata[i] /= s;
rdata[i + 1] /= s;
}
}
static std::vector<float> transpose4d(const std::vector<float> &data,
const std::vector<size_t> &shape,
const std::vector<size_t> &axes) {
// NOTE: Taken from the OMZ text detection sample almost as-is
if (shape.size() != axes.size())
throw std::runtime_error("Shape and axes must have the same dimension.");
for (size_t a : axes) {
if (a >= shape.size())
throw std::runtime_error("Axis must be less than dimension of shape.");
}
size_t total_size = shape[0]*shape[1]*shape[2]*shape[3];
std::vector<size_t> steps {
shape[axes[1]]*shape[axes[2]]*shape[axes[3]],
shape[axes[2]]*shape[axes[3]],
shape[axes[3]],
1
};
size_t source_data_idx = 0;
std::vector<float> new_data(total_size, 0);
std::vector<size_t> ids(shape.size());
for (ids[0] = 0; ids[0] < shape[0]; ids[0]++) {
for (ids[1] = 0; ids[1] < shape[1]; ids[1]++) {
for (ids[2] = 0; ids[2] < shape[2]; ids[2]++) {
for (ids[3]= 0; ids[3] < shape[3]; ids[3]++) {
size_t new_data_idx = ids[axes[0]]*steps[0] + ids[axes[1]]*steps[1] +
ids[axes[2]]*steps[2] + ids[axes[3]]*steps[3];
new_data[new_data_idx] = data[source_data_idx++];
}
}
}
}
return new_data;
}
static std::vector<float> sliceAndGetSecondChannel(const std::vector<float> &data) {
// NOTE: Taken from the OMZ text detection sample almost as-is
std::vector<float> new_data(data.size() / 2, 0);
for (size_t i = 0; i < data.size() / 2; i++) {
new_data[i] = data[2 * i + 1];
}
return new_data;
}
static void join(const int p1,
const int p2,
std::unordered_map<int, int> &group_mask) {
// NOTE: Taken from the OMZ text detection sample almost as-is
const int root1 = findRoot(p1, group_mask);
const int root2 = findRoot(p2, group_mask);
if (root1 != root2) {
group_mask[root1] = root2;
}
}
static cv::Mat decodeImageByJoin(const std::vector<float> &cls_data,
const std::vector<int> &cls_data_shape,
const std::vector<float> &link_data,
const std::vector<int> &link_data_shape,
float cls_conf_threshold,
float link_conf_threshold) {
// NOTE: Taken from the OMZ text detection sample almost as-is
const int h = cls_data_shape[1];
const int w = cls_data_shape[2];
std::vector<uchar> pixel_mask(h * w, 0);
std::unordered_map<int, int> group_mask;
std::vector<cv::Point> points;
for (int i = 0; i < static_cast<int>(pixel_mask.size()); i++) {
pixel_mask[i] = cls_data[i] >= cls_conf_threshold;
if (pixel_mask[i]) {
points.emplace_back(i % w, i / w);
group_mask[i] = -1;
}
}
std::vector<uchar> link_mask(link_data.size(), 0);
for (size_t i = 0; i < link_mask.size(); i++) {
link_mask[i] = link_data[i] >= link_conf_threshold;
}
size_t neighbours = size_t(link_data_shape[3]);
for (const auto &point : points) {
size_t neighbour = 0;
for (int ny = point.y - 1; ny <= point.y + 1; ny++) {
for (int nx = point.x - 1; nx <= point.x + 1; nx++) {
if (nx == point.x && ny == point.y)
continue;
if (nx >= 0 && nx < w && ny >= 0 && ny < h) {
uchar pixel_value = pixel_mask[size_t(ny) * size_t(w) + size_t(nx)];
uchar link_value = link_mask[(size_t(point.y) * size_t(w) + size_t(point.x))
*neighbours + neighbour];
if (pixel_value && link_value) {
join(point.x + point.y * w, nx + ny * w, group_mask);
}
}
neighbour++;
}
}
}
return get_all(points, w, h, group_mask);
}
static cv::Mat get_all(const std::vector<cv::Point> &points,
const int w,
const int h,
std::unordered_map<int, int> &group_mask) {
// NOTE: Taken from the OMZ text detection sample almost as-is
std::unordered_map<int, int> root_map;
cv::Mat mask(h, w, CV_32S, cv::Scalar(0));
for (const auto &point : points) {
int point_root = findRoot(point.x + point.y * w, group_mask);
if (root_map.find(point_root) == root_map.end()) {
root_map.emplace(point_root, static_cast<int>(root_map.size() + 1));
}
mask.at<int>(point.x + point.y * w) = root_map[point_root];
}
return mask;
}
static int findRoot(const int point,
std::unordered_map<int, int> &group_mask) {
// NOTE: Taken from the OMZ text detection sample almost as-is
int root = point;
bool update_parent = false;
while (group_mask.at(root) != -1) {
root = group_mask.at(root);
update_parent = true;
}
if (update_parent) {
group_mask[point] = root;
}
return root;
}
static std::vector<cv::RotatedRect> maskToBoxes(const cv::Mat &mask,
const float min_area,
const float min_height,
const cv::Size &image_size) {
// NOTE: Taken from the OMZ text detection sample almost as-is
std::vector<cv::RotatedRect> bboxes;
double min_val = 0.;
double max_val = 0.;
cv::minMaxLoc(mask, &min_val, &max_val);
int max_bbox_idx = static_cast<int>(max_val);
cv::Mat resized_mask;
cv::resize(mask, resized_mask, image_size, 0, 0, cv::INTER_NEAREST);
for (int i = 1; i <= max_bbox_idx; i++) {
cv::Mat bbox_mask = resized_mask == i;
std::vector<std::vector<cv::Point>> contours;
cv::findContours(bbox_mask, contours, cv::RETR_CCOMP, cv::CHAIN_APPROX_SIMPLE);
if (contours.empty())
continue;
cv::RotatedRect r = cv::minAreaRect(contours[0]);
if (std::min(r.size.width, r.size.height) < min_height)
continue;
if (r.size.area() < min_area)
continue;
bboxes.emplace_back(r);
}
return bboxes;
}
}; // GAPI_OCV_KERNEL(PostProcess)
GAPI_OCV_KERNEL(OCVCropLabels, CropLabels) {
static void run(const cv::Mat &image,
const std::vector<cv::RotatedRect> &detections,
const cv::Size &outSize,
std::vector<cv::Mat> &out) {
out.clear();
out.reserve(detections.size());
cv::Mat crop(outSize, CV_8UC3, cv::Scalar(0));
cv::Mat gray(outSize, CV_8UC1, cv::Scalar(0));
std::vector<int> blob_shape = {1,1,outSize.height,outSize.width};
for (auto &&rr : detections) {
std::vector<cv::Point2f> points(4);
rr.points(points.data());
const auto top_left_point_idx = topLeftPointIdx(points);
cv::Point2f point0 = points[static_cast<size_t>(top_left_point_idx)];
cv::Point2f point1 = points[(top_left_point_idx + 1) % 4];
cv::Point2f point2 = points[(top_left_point_idx + 2) % 4];
std::vector<cv::Point2f> from{point0, point1, point2};
std::vector<cv::Point2f> to{
cv::Point2f(0.0f, 0.0f),
cv::Point2f(static_cast<float>(outSize.width-1), 0.0f),
cv::Point2f(static_cast<float>(outSize.width-1),
static_cast<float>(outSize.height-1))
};
cv::Mat M = cv::getAffineTransform(from, to);
cv::warpAffine(image, crop, M, outSize);
cv::cvtColor(crop, gray, cv::COLOR_BGR2GRAY);
cv::Mat blob;
gray.convertTo(blob, CV_32F);
out.push_back(blob.reshape(1, blob_shape)); // pass as 1,1,H,W instead of H,W
}
}
static int topLeftPointIdx(const std::vector<cv::Point2f> &points) {
// NOTE: Taken from the OMZ text detection sample almost as-is
cv::Point2f most_left(std::numeric_limits<float>::max(),
std::numeric_limits<float>::max());
cv::Point2f almost_most_left(std::numeric_limits<float>::max(),
std::numeric_limits<float>::max());
int most_left_idx = -1;
int almost_most_left_idx = -1;
for (size_t i = 0; i < points.size() ; i++) {
if (most_left.x > points[i].x) {
if (most_left.x < std::numeric_limits<float>::max()) {
almost_most_left = most_left;
almost_most_left_idx = most_left_idx;
}
most_left = points[i];
most_left_idx = static_cast<int>(i);
}
if (almost_most_left.x > points[i].x && points[i] != most_left) {
almost_most_left = points[i];
almost_most_left_idx = static_cast<int>(i);
}
}
if (almost_most_left.y < most_left.y) {
most_left = almost_most_left;
most_left_idx = almost_most_left_idx;
}
return most_left_idx;
}
}; // GAPI_OCV_KERNEL(CropLabels)
} // anonymous namespace
} // namespace custom
namespace vis {
namespace {
void drawRotatedRect(cv::Mat &m, const cv::RotatedRect &rc) {
std::vector<cv::Point2f> tmp_points(5);
rc.points(tmp_points.data());
tmp_points[4] = tmp_points[0];
auto prev = tmp_points.begin(), it = prev+1;
for (; it != tmp_points.end(); ++it) {
cv::line(m, *prev, *it, cv::Scalar(50, 205, 50), 2);
prev = it;
}
}
void drawText(cv::Mat &m, const cv::RotatedRect &rc, const std::string &str) {
const int fface = cv::FONT_HERSHEY_SIMPLEX;
const double scale = 0.7;
const int thick = 1;
int base = 0;
const auto text_size = cv::getTextSize(str, fface, scale, thick, &base);
std::vector<cv::Point2f> tmp_points(4);
rc.points(tmp_points.data());
const auto tl_point_idx = custom::OCVCropLabels::topLeftPointIdx(tmp_points);
cv::Point text_pos = tmp_points[tl_point_idx];
text_pos.x = std::max(0, text_pos.x);
text_pos.y = std::max(text_size.height, text_pos.y);
cv::rectangle(m,
text_pos + cv::Point{0, base},
text_pos + cv::Point{text_size.width, -text_size.height},
CV_RGB(50, 205, 50),
cv::FILLED);
const auto white = CV_RGB(255, 255, 255);
cv::putText(m, str, text_pos, fface, scale, white, thick, 8);
}
} // anonymous namespace
} // namespace vis
int main(int argc, char *argv[])
{
cv::CommandLineParser cmd(argc, argv, keys);
cmd.about(about);
if (cmd.has("help")) {
cmd.printMessage();
return 0;
}
const auto input_file_name = cmd.get<std::string>("input");
const auto tdet_model_path = cmd.get<std::string>("tdm");
const auto trec_model_path = cmd.get<std::string>("trm");
const auto tdet_target_dev = cmd.get<std::string>("tdd");
const auto trec_target_dev = cmd.get<std::string>("trd");
const auto ctc_beam_dec_bw = cmd.get<int>("bw");
const auto dec_conf_thresh = cmd.get<double>("thr");
const auto pad_symbol = '#';
const auto symbol_set = cmd.get<std::string>("sset") + pad_symbol;
cv::GMat in;
cv::GOpaque<cv::Size> in_rec_sz;
cv::GMat link, segm;
std::tie(link, segm) = cv::gapi::infer<custom::TextDetection>(in);
cv::GOpaque<cv::Size> size = cv::gapi::streaming::size(in);
cv::GArray<cv::RotatedRect> rrs = custom::PostProcess::on(link, segm, size, 0.8f, 0.8f);
cv::GArray<cv::GMat> labels = custom::CropLabels::on(in, rrs, in_rec_sz);
cv::GArray<cv::GMat> text = cv::gapi::infer2<custom::TextRecognition>(in, labels);
cv::GComputation graph(cv::GIn(in, in_rec_sz),
cv::GOut(cv::gapi::copy(in), rrs, text));
// Text detection network
auto tdet_net = cv::gapi::ie::Params<custom::TextDetection> {
tdet_model_path, // path to topology IR
weights_path(tdet_model_path), // path to weights
tdet_target_dev, // device specifier
}.cfgOutputLayers({"model/link_logits_/add", "model/segm_logits/add"});
auto trec_net = cv::gapi::ie::Params<custom::TextRecognition> {
trec_model_path, // path to topology IR
weights_path(trec_model_path), // path to weights
trec_target_dev, // device specifier
};
auto networks = cv::gapi::networks(tdet_net, trec_net);
auto kernels = cv::gapi::kernels< custom::OCVPostProcess
, custom::OCVCropLabels
>();
auto pipeline = graph.compileStreaming(cv::compile_args(kernels, networks));
std::cout << "Reading " << input_file_name << std::endl;
// Input stream
auto in_src = cv::gapi::wip::make_src<cv::gapi::wip::GCaptureSource>(input_file_name);
// Text recognition input size (also an input parameter to the graph)
auto in_rsz = cv::Size{ 120, 32 };
// Set the pipeline source & start the pipeline
pipeline.setSource(cv::gin(in_src, in_rsz));
pipeline.start();
// Declare the output data & run the processing loop
cv::TickMeter tm;
cv::Mat image;
std::vector<cv::RotatedRect> out_rcs;
std::vector<cv::Mat> out_text;
tm.start();
int frames = 0;
while (pipeline.pull(cv::gout(image, out_rcs, out_text))) {
frames++;
CV_Assert(out_rcs.size() == out_text.size());
const auto num_labels = out_rcs.size();
std::vector<cv::Point2f> tmp_points(4);
for (std::size_t l = 0; l < num_labels; l++) {
// Decode the recognized text in the rectangle
const auto &blob = out_text[l];
const float *data = blob.ptr<float>();
const auto sz = blob.total();
double conf = 1.0;
const std::string res = ctc_beam_dec_bw == 0
? CTCGreedyDecoder(data, sz, symbol_set, pad_symbol, &conf)
: CTCBeamSearchDecoder(data, sz, symbol_set, &conf, ctc_beam_dec_bw);
// Draw a bounding box for this rotated rectangle
const auto &rc = out_rcs[l];
vis::drawRotatedRect(image, rc);
// Draw text, if decoded
if (conf >= dec_conf_thresh) {
vis::drawText(image, rc, res);
}
}
tm.stop();
cv::imshow("Out", image);
cv::waitKey(1);
tm.start();
}
tm.stop();
std::cout << "Processed " << frames << " frames"
<< " (" << frames / tm.getTimeSec() << " FPS)" << std::endl;
return 0;
}

View File

@ -2,7 +2,7 @@
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Copyright (C) 2018 Intel Corporation
// Copyright (C) 2018-2020 Intel Corporation
#include "precomp.hpp"
@ -67,6 +67,21 @@ cv::gapi::GKernelPackage cv::gapi::GBackend::Priv::auxiliaryKernels() const
return {};
}
bool cv::gapi::GBackend::Priv::controlsMerge() const
{
return false;
}
bool cv::gapi::GBackend::Priv::allowsMerge(const cv::gimpl::GIslandModel::Graph &,
const ade::NodeHandle &,
const ade::NodeHandle &,
const ade::NodeHandle &) const
{
GAPI_Assert(controlsMerge());
return true;
}
// GBackend public implementation //////////////////////////////////////////////
cv::gapi::GBackend::GBackend()
{
@ -103,38 +118,42 @@ namespace cv {
namespace gimpl {
namespace magazine {
// FIXME implement the below functions with visit()?
namespace {
// Utility function, used in both bindInArg and bindOutArg,
// implements default RMat bind behaviour (if backend doesn't handle RMats in specific way):
// view + wrapped cv::Mat are placed into the magazine
void bindRMat(Mag& mag, const RcDesc& rc, const cv::RMat& rmat, RMat::Access a)
{
auto& matv = mag.template slot<RMat::View>()[rc.id];
matv = rmat.access(a);
mag.template slot<cv::Mat>()[rc.id] = asMat(matv);
}
} // anonymous namespace
void bindInArg(Mag& mag, const RcDesc &rc, const GRunArg &arg, bool is_umat)
// FIXME implement the below functions with visit()?
void bindInArg(Mag& mag, const RcDesc &rc, const GRunArg &arg, HandleRMat handleRMat)
{
switch (rc.shape)
{
case GShape::GMAT:
{
switch (arg.index())
{
case GRunArg::index_of<cv::Mat>() :
if (is_umat)
{
// In case of handleRMat == SKIP
// We assume that backend can work with some device-specific RMats
// and will handle them in some specific way, so just return
if (handleRMat == HandleRMat::SKIP) return;
GAPI_Assert(arg.index() == GRunArg::index_of<cv::RMat>());
bindRMat(mag, rc, util::get<cv::RMat>(arg), RMat::Access::R);
// FIXME: Here meta may^WWILL be copied multiple times!
// Replace it is reference-counted object?
mag.meta<cv::RMat>()[rc.id] = arg.meta;
mag.meta<cv::Mat>()[rc.id] = arg.meta;
#if !defined(GAPI_STANDALONE)
auto& mag_umat = mag.template slot<cv::UMat>()[rc.id];
mag_umat = util::get<cv::Mat>(arg).getUMat(ACCESS_READ);
#else
util::throw_error(std::logic_error("UMat is not supported in standalone build"));
#endif // !defined(GAPI_STANDALONE)
}
else
{
auto& mag_mat = mag.template slot<cv::Mat>()[rc.id];
mag_mat = util::get<cv::Mat>(arg);
}
break;
default: util::throw_error(std::logic_error("content type of the runtime argument does not match to resource description ?"));
}
mag.meta<cv::UMat>()[rc.id] = arg.meta;
#endif
break;
}
case GShape::GSCALAR:
{
auto& mag_scalar = mag.template slot<cv::Scalar>()[rc.id];
@ -143,15 +162,23 @@ void bindInArg(Mag& mag, const RcDesc &rc, const GRunArg &arg, bool is_umat)
case GRunArg::index_of<cv::Scalar>() : mag_scalar = util::get<cv::Scalar>(arg); break;
default: util::throw_error(std::logic_error("content type of the runtime argument does not match to resource description ?"));
}
mag.meta<cv::Scalar>()[rc.id] = arg.meta;
break;
}
case GShape::GARRAY:
mag.template slot<cv::detail::VectorRef>()[rc.id] = util::get<cv::detail::VectorRef>(arg);
mag.slot<cv::detail::VectorRef>()[rc.id] = util::get<cv::detail::VectorRef>(arg);
mag.meta<cv::detail::VectorRef>()[rc.id] = arg.meta;
break;
case GShape::GOPAQUE:
mag.template slot<cv::detail::OpaqueRef>()[rc.id] = util::get<cv::detail::OpaqueRef>(arg);
mag.slot<cv::detail::OpaqueRef>()[rc.id] = util::get<cv::detail::OpaqueRef>(arg);
mag.meta<cv::detail::OpaqueRef>()[rc.id] = arg.meta;
break;
case GShape::GFRAME:
mag.slot<cv::MediaFrame>()[rc.id] = util::get<cv::MediaFrame>(arg);
mag.meta<cv::MediaFrame>()[rc.id] = arg.meta;
break;
default:
@ -159,32 +186,18 @@ void bindInArg(Mag& mag, const RcDesc &rc, const GRunArg &arg, bool is_umat)
}
}
void bindOutArg(Mag& mag, const RcDesc &rc, const GRunArgP &arg, bool is_umat)
void bindOutArg(Mag& mag, const RcDesc &rc, const GRunArgP &arg, HandleRMat handleRMat)
{
switch (rc.shape)
{
case GShape::GMAT:
{
switch (arg.index())
{
case GRunArgP::index_of<cv::Mat*>() :
if (is_umat)
{
#if !defined(GAPI_STANDALONE)
auto& mag_umat = mag.template slot<cv::UMat>()[rc.id];
mag_umat = util::get<cv::Mat*>(arg)->getUMat(ACCESS_RW);
#else
util::throw_error(std::logic_error("UMat is not supported in standalone build"));
#endif // !defined(GAPI_STANDALONE)
}
else
{
auto& mag_mat = mag.template slot<cv::Mat>()[rc.id];
mag_mat = *util::get<cv::Mat*>(arg);
}
break;
default: util::throw_error(std::logic_error("content type of the runtime argument does not match to resource description ?"));
}
// In case of handleRMat == SKIP
// We assume that backend can work with some device-specific RMats
// and will handle them in some specific way, so just return
if (handleRMat == HandleRMat::SKIP) return;
GAPI_Assert(arg.index() == GRunArgP::index_of<cv::RMat*>());
bindRMat(mag, rc, *util::get<cv::RMat*>(arg), RMat::Access::W);
break;
}
@ -198,6 +211,9 @@ void bindOutArg(Mag& mag, const RcDesc &rc, const GRunArgP &arg, bool is_umat)
}
break;
}
case GShape::GFRAME:
mag.template slot<cv::MediaFrame>()[rc.id] = *util::get<cv::MediaFrame*>(arg);
break;
case GShape::GARRAY:
mag.template slot<cv::detail::VectorRef>()[rc.id] = util::get<cv::detail::VectorRef>(arg);
break;
@ -234,6 +250,7 @@ void resetInternalData(Mag& mag, const Data &d)
break;
case GShape::GMAT:
case GShape::GFRAME:
// Do nothing here - FIXME unify with initInternalData?
break;
@ -248,12 +265,23 @@ cv::GRunArg getArg(const Mag& mag, const RcDesc &ref)
// Wrap associated CPU object (either host or an internal one)
switch (ref.shape)
{
case GShape::GMAT: return GRunArg(mag.template slot<cv::Mat>().at(ref.id));
case GShape::GSCALAR: return GRunArg(mag.template slot<cv::Scalar>().at(ref.id));
case GShape::GMAT:
return GRunArg(mag.slot<cv::RMat>().at(ref.id),
mag.meta<cv::RMat>().at(ref.id));
case GShape::GSCALAR:
return GRunArg(mag.slot<cv::Scalar>().at(ref.id),
mag.meta<cv::Scalar>().at(ref.id));
// Note: .at() is intentional for GArray and GOpaque as objects MUST be already there
// (and constructed by either bindIn/Out or resetInternal)
case GShape::GARRAY: return GRunArg(mag.template slot<cv::detail::VectorRef>().at(ref.id));
case GShape::GOPAQUE: return GRunArg(mag.template slot<cv::detail::OpaqueRef>().at(ref.id));
case GShape::GARRAY:
return GRunArg(mag.slot<cv::detail::VectorRef>().at(ref.id),
mag.meta<cv::detail::VectorRef>().at(ref.id));
case GShape::GOPAQUE:
return GRunArg(mag.slot<cv::detail::OpaqueRef>().at(ref.id),
mag.meta<cv::detail::OpaqueRef>().at(ref.id));
case GShape::GFRAME:
return GRunArg(mag.slot<cv::MediaFrame>().at(ref.id),
mag.meta<cv::MediaFrame>().at(ref.id));
default:
util::throw_error(std::logic_error("Unsupported GShape type"));
break;
@ -294,52 +322,24 @@ cv::GRunArgP getObjPtr(Mag& mag, const RcDesc &rc, bool is_umat)
// debugging this!!!1
return GRunArgP(const_cast<const Mag&>(mag)
.template slot<cv::detail::OpaqueRef>().at(rc.id));
case GShape::GFRAME:
return GRunArgP(&mag.template slot<cv::MediaFrame>()[rc.id]);
default:
util::throw_error(std::logic_error("Unsupported GShape type"));
break;
}
}
void writeBack(const Mag& mag, const RcDesc &rc, GRunArgP &g_arg, bool is_umat)
void writeBack(const Mag& mag, const RcDesc &rc, GRunArgP &g_arg)
{
switch (rc.shape)
{
case GShape::GARRAY:
// Do nothing - should we really do anything here?
break;
case GShape::GOPAQUE:
// Do nothing - should we really do anything here?
break;
case GShape::GMAT:
{
//simply check that memory was not reallocated, i.e.
//both instances of Mat pointing to the same memory
uchar* out_arg_data = nullptr;
switch (g_arg.index())
{
case GRunArgP::index_of<cv::Mat*>() : out_arg_data = util::get<cv::Mat*>(g_arg)->data; break;
#if !defined(GAPI_STANDALONE)
case GRunArgP::index_of<cv::UMat*>() : out_arg_data = (util::get<cv::UMat*>(g_arg))->getMat(ACCESS_RW).data; break;
#endif // !defined(GAPI_STANDALONE)
default: util::throw_error(std::logic_error("content type of the runtime argument does not match to resource description ?"));
}
if (is_umat)
{
#if !defined(GAPI_STANDALONE)
auto& in_mag = mag.template slot<cv::UMat>().at(rc.id);
GAPI_Assert((out_arg_data == (in_mag.getMat(ACCESS_RW).data)) && " data for output parameters was reallocated ?");
#else
util::throw_error(std::logic_error("UMat is not supported in standalone build"));
#endif // !defined(GAPI_STANDALONE)
}
else
{
auto& in_mag = mag.template slot<cv::Mat>().at(rc.id);
GAPI_Assert((out_arg_data == in_mag.data) && " data for output parameters was reallocated ?");
}
case GShape::GOPAQUE:
// Do nothing - should we really do anything here?
break;
}
case GShape::GSCALAR:
{
@ -351,12 +351,50 @@ void writeBack(const Mag& mag, const RcDesc &rc, GRunArgP &g_arg, bool is_umat)
break;
}
case GShape::GFRAME:
{
*util::get<cv::MediaFrame*>(g_arg) = mag.template slot<cv::MediaFrame>().at(rc.id);
break;
}
default:
util::throw_error(std::logic_error("Unsupported GShape type"));
break;
}
}
void unbind(Mag& mag, const RcDesc &rc)
{
switch (rc.shape)
{
case GShape::GARRAY:
case GShape::GOPAQUE:
case GShape::GSCALAR:
// TODO: Do nothing - should we really do anything here?
break;
case GShape::GMAT:
// Clean-up everything - a cv::Mat, cv::RMat::View, a cv::UMat, and cv::RMat
// if applicable
mag.slot<cv::Mat>().erase(rc.id);
#if !defined(GAPI_STANDALONE)
mag.slot<cv::UMat>().erase(rc.id);
#endif
mag.slot<cv::RMat::View>().erase(rc.id);
mag.slot<cv::RMat>().erase(rc.id);
break;
case GShape::GFRAME:
// MediaFrame can also be associated with external memory,
// so requires a special handling here.
mag.slot<cv::MediaFrame>().erase(rc.id);
break;
default:
GAPI_Assert(false);
}
}
} // namespace magazine
void createMat(const cv::GMatDesc &desc, cv::Mat& mat)

View File

@ -19,7 +19,7 @@
#include "opencv2/gapi/gkernel.hpp"
#include "compiler/gmodel.hpp"
#include "compiler/gislandmodel.hpp"
namespace cv
{
@ -68,6 +68,22 @@ public:
virtual cv::gapi::GKernelPackage auxiliaryKernels() const;
// Ask backend if it has a custom control over island fusion process
// This method is quite redundant but there's nothing better fits
// the current fusion process. By default, [existing] backends don't
// control the merge.
// FIXME: Refactor to a single entity?
virtual bool controlsMerge() const;
// Ask backend if it is ok to merge these two islands connected
// via a data slot. By default, [existing] backends allow to merge everything.
// FIXME: Refactor to a single entity?
// FIXME: Strip down the type details form graph? (make it ade::Graph?)
virtual bool allowsMerge(const cv::gimpl::GIslandModel::Graph &g,
const ade::NodeHandle &a_nh,
const ade::NodeHandle &slot_nh,
const ade::NodeHandle &b_nh) const;
virtual ~Priv() = default;
};

View File

@ -69,6 +69,11 @@ cv::detail::GOpaqueU cv::GCall::yieldOpaque(int output)
return cv::detail::GOpaqueU(m_priv->m_node, output);
}
cv::GFrame cv::GCall::yieldFrame(int output)
{
return cv::GFrame(m_priv->m_node, output);
}
cv::GCall::Priv& cv::GCall::priv()
{
return *m_priv;
@ -78,3 +83,13 @@ const cv::GCall::Priv& cv::GCall::priv() const
{
return *m_priv;
}
cv::GKernel& cv::GCall::kernel()
{
return m_priv->m_k;
}
cv::util::any& cv::GCall::params()
{
return m_priv->m_params;
}

View File

@ -42,10 +42,11 @@ class GCall::Priv
{
public:
std::vector<GArg> m_args;
const GKernel m_k;
GKernel m_k;
// TODO: Rename to "constructionNode" or smt to reflect its lifetime
GNode m_node;
cv::util::any m_params;
explicit Priv(const GKernel &k);
};

View File

@ -9,6 +9,7 @@
#include <algorithm> // remove_if
#include <cctype> // isspace (non-locale version)
#include <ade/util/algorithm.hpp>
#include <ade/util/zip_range.hpp> // util::indexed
#include "logger.hpp" // GAPI_LOG
@ -21,6 +22,7 @@
#include "compiler/gmodelbuilder.hpp"
#include "compiler/gcompiler.hpp"
#include "compiler/gcompiled_priv.hpp"
// cv::GComputation private implementation /////////////////////////////////////
// <none>
@ -73,18 +75,18 @@ cv::GComputation::GComputation(cv::GProtoInputArgs &&ins,
};
}
cv::GComputation::GComputation(cv::gimpl::s11n::I::IStream &is)
cv::GComputation::GComputation(cv::gapi::s11n::IIStream &is)
: m_priv(new Priv())
{
m_priv->m_shape = gimpl::s11n::deserialize(is);
m_priv->m_shape = gapi::s11n::deserialize(is);
}
void cv::GComputation::serialize(cv::gimpl::s11n::I::OStream &os) const
void cv::GComputation::serialize(cv::gapi::s11n::IOStream &os) const
{
// Build a basic GModel and write the whole thing to the stream
auto pG = cv::gimpl::GCompiler::makeGraph(*m_priv);
std::vector<ade::NodeHandle> nhs(pG->nodes().begin(), pG->nodes().end());
gimpl::s11n::serialize(os, *pG, nhs);
gapi::s11n::serialize(os, *pG, nhs);
}
@ -129,15 +131,14 @@ static bool formats_are_same(const cv::GMetaArgs& metas1, const cv::GMetaArgs& m
});
}
void cv::GComputation::apply(GRunArgs &&ins, GRunArgsP &&outs, GCompileArgs &&args)
void cv::GComputation::recompile(GMetaArgs&& in_metas, GCompileArgs &&args)
{
const auto in_metas = descr_of(ins);
// FIXME Graph should be recompiled when GCompileArgs have changed
if (m_priv->m_lastMetas != in_metas)
{
if (m_priv->m_lastCompiled &&
m_priv->m_lastCompiled.canReshape() &&
formats_are_same(m_priv->m_lastMetas, in_metas))
m_priv->m_lastCompiled.canReshape() &&
formats_are_same(m_priv->m_lastMetas, in_metas))
{
m_priv->m_lastCompiled.reshape(in_metas, args);
}
@ -148,6 +149,11 @@ void cv::GComputation::apply(GRunArgs &&ins, GRunArgsP &&outs, GCompileArgs &&ar
}
m_priv->m_lastMetas = in_metas;
}
}
void cv::GComputation::apply(GRunArgs &&ins, GRunArgsP &&outs, GCompileArgs &&args)
{
recompile(descr_of(ins), std::move(args));
m_priv->m_lastCompiled(std::move(ins), std::move(outs));
}
@ -165,6 +171,55 @@ void cv::GComputation::apply(const std::vector<cv::Mat> &ins,
apply(std::move(call_ins), std::move(call_outs), std::move(args));
}
// NB: This overload is called from python code
cv::GRunArgs cv::GComputation::apply(GRunArgs &&ins, GCompileArgs &&args)
{
recompile(descr_of(ins), std::move(args));
const auto& out_info = m_priv->m_lastCompiled.priv().outInfo();
GRunArgs run_args;
GRunArgsP outs;
run_args.reserve(out_info.size());
outs.reserve(out_info.size());
for (auto&& info : out_info)
{
switch (info.shape)
{
case cv::GShape::GMAT:
{
run_args.emplace_back(cv::Mat{});
outs.emplace_back(&cv::util::get<cv::Mat>(run_args.back()));
break;
}
case cv::GShape::GSCALAR:
{
run_args.emplace_back(cv::Scalar{});
outs.emplace_back(&cv::util::get<cv::Scalar>(run_args.back()));
break;
}
case cv::GShape::GARRAY:
{
switch (info.kind)
{
case cv::detail::OpaqueKind::CV_POINT2F:
run_args.emplace_back(cv::detail::VectorRef{std::vector<cv::Point2f>{}});
outs.emplace_back(cv::util::get<cv::detail::VectorRef>(run_args.back()));
break;
default:
util::throw_error(std::logic_error("Unsupported kind for GArray"));
}
break;
}
default:
util::throw_error(std::logic_error("Only cv::GMat and cv::GScalar are supported for python output"));
}
}
m_priv->m_lastCompiled(std::move(ins), std::move(outs));
return run_args;
}
#if !defined(GAPI_STANDALONE)
void cv::GComputation::apply(cv::Mat in, cv::Mat &out, GCompileArgs &&args)
{

View File

@ -29,7 +29,7 @@ public:
cv::GProtoArgs m_outs;
};
using Dump = cv::gimpl::s11n::GSerialized;
using Dump = cv::gapi::s11n::GSerialized;
using Shape = cv::util::variant
< Expr // An expression-based graph

View File

@ -8,21 +8,17 @@
#include "precomp.hpp"
#include <opencv2/gapi/gframe.hpp>
#include <opencv2/gapi/media.hpp>
#include "api/gorigin.hpp"
// cv::GFrame public implementation //////////////////////////////////////////////
cv::GFrame::GFrame()
: m_priv(new GOrigin(GShape::GMAT, GNode::Param())) {
// N.B.: The shape here is still GMAT as currently cv::Mat is used
// as an underlying host type. Will be changed to GFRAME once
// GExecutor & GStreamingExecutor & selected backends will be extended
// to support cv::MediaFrame.
: m_priv(new GOrigin(GShape::GFRAME, GNode::Param())) {
}
cv::GFrame::GFrame(const GNode &n, std::size_t out)
: m_priv(new GOrigin(GShape::GMAT, n, out)) {
// N.B.: GMAT is here for the same reason as above ^
: m_priv(new GOrigin(GShape::GFRAME, n, out)) {
}
cv::GOrigin& cv::GFrame::priv() {
@ -34,7 +30,23 @@ const cv::GOrigin& cv::GFrame::priv() const {
}
namespace cv {
std::ostream& operator<<(std::ostream& os, const cv::GFrameDesc &) {
bool GFrameDesc::operator== (const GFrameDesc &rhs) const {
return fmt == rhs.fmt && size == rhs.size;
}
GFrameDesc descr_of(const cv::MediaFrame &frame) {
return frame.desc();
}
std::ostream& operator<<(std::ostream& os, const cv::GFrameDesc &d) {
os << '[';
switch (d.fmt) {
case MediaFormat::BGR: os << "BGR"; break;
case MediaFormat::NV12: os << "NV12"; break;
default: GAPI_Assert(false && "Invalid media format");
}
os << ' ' << d.size << ']';
return os;
}

View File

@ -16,8 +16,8 @@
#include <opencv2/gapi/infer.hpp>
cv::gapi::GNetPackage::GNetPackage(std::initializer_list<GNetParam> &&ii)
: networks(std::move(ii)) {
cv::gapi::GNetPackage::GNetPackage(std::initializer_list<GNetParam> ii)
: networks(ii) {
}
std::vector<cv::gapi::GBackend> cv::gapi::GNetPackage::backends() const {
@ -25,3 +25,59 @@ std::vector<cv::gapi::GBackend> cv::gapi::GNetPackage::backends() const {
for (const auto &nn : networks) unique_set.insert(nn.backend);
return std::vector<cv::gapi::GBackend>(unique_set.begin(), unique_set.end());
}
// FIXME: Inference API is currently only available in full mode
#if !defined(GAPI_STANDALONE)
cv::GInferInputs::GInferInputs()
: in_blobs(std::make_shared<Map>())
{
}
cv::GMat& cv::GInferInputs::operator[](const std::string& name) {
return (*in_blobs)[name];
}
const cv::GInferInputs::Map& cv::GInferInputs::getBlobs() const {
return *in_blobs;
}
void cv::GInferInputs::setInput(const std::string& name, const cv::GMat& value) {
in_blobs->emplace(name, value);
}
struct cv::GInferOutputs::Priv
{
Priv(std::shared_ptr<cv::GCall>);
std::shared_ptr<cv::GCall> call;
InOutInfo* info = nullptr;
std::unordered_map<std::string, cv::GMat> out_blobs;
};
cv::GInferOutputs::Priv::Priv(std::shared_ptr<cv::GCall> c)
: call(std::move(c)), info(cv::util::any_cast<InOutInfo>(&call->params()))
{
}
cv::GInferOutputs::GInferOutputs(std::shared_ptr<cv::GCall> call)
: m_priv(std::make_shared<cv::GInferOutputs::Priv>(std::move(call)))
{
}
cv::GMat cv::GInferOutputs::at(const std::string& name)
{
auto it = m_priv->out_blobs.find(name);
if (it == m_priv->out_blobs.end()) {
// FIXME: Avoid modifying GKernel
// Expect output to be always GMat
m_priv->call->kernel().outShapes.push_back(cv::GShape::GMAT);
// ...so _empty_ constructor is passed here.
m_priv->call->kernel().outCtors.emplace_back(cv::util::monostate{});
int out_idx = static_cast<int>(m_priv->out_blobs.size());
it = m_priv->out_blobs.emplace(name, m_priv->call->yield(out_idx)).first;
m_priv->info->out_names.push_back(name);
}
return it->second;
}
#endif // GAPI_STANDALONE

View File

@ -36,6 +36,38 @@ const cv::GOrigin& cv::GMat::priv() const
return *m_priv;
}
static std::vector<int> checkVectorImpl(const int width, const int height, const int chan,
const int n)
{
if (width == 1 && (n == -1 || n == chan))
{
return {height, chan};
}
else if (height == 1 && (n == -1 || n == chan))
{
return {width, chan};
}
else if (chan == 1 && (n == -1 || n == width))
{
return {height, width};
}
else // input Mat can't be described as vector of points of given dimensionality
{
return {-1, -1};
}
}
int cv::gapi::detail::checkVector(const cv::GMatDesc& in, const size_t n)
{
GAPI_Assert(n != 0u);
return checkVectorImpl(in.size.width, in.size.height, in.chan, static_cast<int>(n))[0];
}
std::vector<int> cv::gapi::detail::checkVector(const cv::GMatDesc& in)
{
return checkVectorImpl(in.size.width, in.size.height, in.chan, -1);
}
namespace{
template <typename T> cv::GMetaArgs vec_descr_of(const std::vector<T> &vec)
{
@ -95,6 +127,11 @@ cv::GMetaArgs cv::gapi::own::descrs_of(const std::vector<Mat> &vec)
return vec_descr_of(vec);
}
cv::GMatDesc cv::descr_of(const cv::RMat &mat)
{
return mat.desc();
}
namespace cv {
std::ostream& operator<<(std::ostream& os, const cv::GMatDesc &desc)
{
@ -137,4 +174,9 @@ bool GMatDesc::canDescribe(const cv::Mat& mat) const
return canDescribeHelper(*this, mat);
}
bool GMatDesc::canDescribe(const cv::RMat& mat) const
{
return canDescribeHelper(*this, mat);
}
}// namespace cv

View File

@ -119,6 +119,12 @@ cv::GMetaArg cv::descr_of(const cv::GRunArg &arg)
case GRunArg::index_of<cv::gapi::wip::IStreamSource::Ptr>():
return cv::util::get<cv::gapi::wip::IStreamSource::Ptr>(arg)->descr_of();
case GRunArg::index_of<cv::RMat>():
return cv::GMetaArg(cv::util::get<cv::RMat>(arg).desc());
case GRunArg::index_of<cv::MediaFrame>():
return cv::GMetaArg(cv::util::get<cv::MediaFrame>(arg).desc());
default: util::throw_error(std::logic_error("Unsupported GRunArg type"));
}
}
@ -130,6 +136,7 @@ cv::GMetaArgs cv::descr_of(const cv::GRunArgs &args)
return metas;
}
// FIXME: Is it tested for all types?
cv::GMetaArg cv::descr_of(const cv::GRunArgP &argp)
{
switch (argp.index())
@ -139,12 +146,14 @@ cv::GMetaArg cv::descr_of(const cv::GRunArgP &argp)
#endif // !defined(GAPI_STANDALONE)
case GRunArgP::index_of<cv::Mat*>(): return GMetaArg(cv::descr_of(*util::get<cv::Mat*>(argp)));
case GRunArgP::index_of<cv::Scalar*>(): return GMetaArg(descr_of(*util::get<cv::Scalar*>(argp)));
case GRunArgP::index_of<cv::MediaFrame*>(): return GMetaArg(descr_of(*util::get<cv::MediaFrame*>(argp)));
case GRunArgP::index_of<cv::detail::VectorRef>(): return GMetaArg(util::get<cv::detail::VectorRef>(argp).descr_of());
case GRunArgP::index_of<cv::detail::OpaqueRef>(): return GMetaArg(util::get<cv::detail::OpaqueRef>(argp).descr_of());
default: util::throw_error(std::logic_error("Unsupported GRunArgP type"));
}
}
// FIXME: Is it tested for all types??
bool cv::can_describe(const GMetaArg& meta, const GRunArgP& argp)
{
switch (argp.index())
@ -155,12 +164,14 @@ bool cv::can_describe(const GMetaArg& meta, const GRunArgP& argp)
case GRunArgP::index_of<cv::Mat*>(): return util::holds_alternative<GMatDesc>(meta) &&
util::get<GMatDesc>(meta).canDescribe(*util::get<cv::Mat*>(argp));
case GRunArgP::index_of<cv::Scalar*>(): return meta == GMetaArg(cv::descr_of(*util::get<cv::Scalar*>(argp)));
case GRunArgP::index_of<cv::MediaFrame*>(): return meta == GMetaArg(cv::descr_of(*util::get<cv::MediaFrame*>(argp)));
case GRunArgP::index_of<cv::detail::VectorRef>(): return meta == GMetaArg(util::get<cv::detail::VectorRef>(argp).descr_of());
case GRunArgP::index_of<cv::detail::OpaqueRef>(): return meta == GMetaArg(util::get<cv::detail::OpaqueRef>(argp).descr_of());
default: util::throw_error(std::logic_error("Unsupported GRunArgP type"));
}
}
// FIXME: Is it tested for all types??
bool cv::can_describe(const GMetaArg& meta, const GRunArg& arg)
{
switch (arg.index())
@ -174,6 +185,9 @@ bool cv::can_describe(const GMetaArg& meta, const GRunArg& arg)
case GRunArg::index_of<cv::detail::VectorRef>(): return meta == cv::GMetaArg(util::get<cv::detail::VectorRef>(arg).descr_of());
case GRunArg::index_of<cv::detail::OpaqueRef>(): return meta == cv::GMetaArg(util::get<cv::detail::OpaqueRef>(arg).descr_of());
case GRunArg::index_of<cv::gapi::wip::IStreamSource::Ptr>(): return util::holds_alternative<GMatDesc>(meta); // FIXME(?) may be not the best option
case GRunArg::index_of<cv::RMat>(): return util::holds_alternative<GMatDesc>(meta) &&
util::get<GMatDesc>(meta).canDescribe(cv::util::get<cv::RMat>(arg));
case GRunArg::index_of<cv::MediaFrame>(): return meta == cv::GMetaArg(util::get<cv::MediaFrame>(arg).desc());
default: util::throw_error(std::logic_error("Unsupported GRunArg type"));
}
}
@ -187,6 +201,8 @@ bool cv::can_describe(const GMetaArgs &metas, const GRunArgs &args)
});
}
// FIXME: Is it tested for all types?
// FIXME: Where does this validation happen??
void cv::validate_input_arg(const GRunArg& arg)
{
// FIXME: It checks only Mat argument
@ -243,6 +259,11 @@ std::ostream& operator<<(std::ostream& os, const cv::GMetaArg &arg)
case cv::GMetaArg::index_of<cv::GOpaqueDesc>():
os << util::get<cv::GOpaqueDesc>(arg);
break;
case cv::GMetaArg::index_of<cv::GFrameDesc>():
os << util::get<cv::GFrameDesc>(arg);
break;
default:
GAPI_Assert(false);
}
@ -263,10 +284,14 @@ const void* cv::gimpl::proto::ptr(const GRunArgP &arg)
return static_cast<const void*>(cv::util::get<cv::Mat*>(arg));
case GRunArgP::index_of<cv::Scalar*>():
return static_cast<const void*>(cv::util::get<cv::Scalar*>(arg));
case GRunArgP::index_of<cv::RMat*>():
return static_cast<const void*>(cv::util::get<cv::RMat*>(arg));
case GRunArgP::index_of<cv::detail::VectorRef>():
return cv::util::get<cv::detail::VectorRef>(arg).ptr();
case GRunArgP::index_of<cv::detail::OpaqueRef>():
return cv::util::get<cv::detail::OpaqueRef>(arg).ptr();
case GRunArgP::index_of<cv::MediaFrame*>():
return static_cast<const void*>(cv::util::get<cv::MediaFrame*>(arg));
default:
util::throw_error(std::logic_error("Unknown GRunArgP type!"));
}

View File

@ -0,0 +1,33 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Copyright (C) 2020 Intel Corporation
#include "precomp.hpp"
#include <opencv2/gapi/garg.hpp>
cv::GRunArg::GRunArg() {
}
cv::GRunArg::GRunArg(const cv::GRunArg &arg)
: cv::GRunArgBase(static_cast<const cv::GRunArgBase&>(arg))
, meta(arg.meta) {
}
cv::GRunArg::GRunArg(cv::GRunArg &&arg)
: cv::GRunArgBase(std::move(static_cast<const cv::GRunArgBase&>(arg)))
, meta(std::move(arg.meta)) {
}
cv::GRunArg& cv::GRunArg::operator= (const cv::GRunArg &arg) {
cv::GRunArgBase::operator=(static_cast<const cv::GRunArgBase&>(arg));
meta = arg.meta;
return *this;
}
cv::GRunArg& cv::GRunArg::operator= (cv::GRunArg &&arg) {
cv::GRunArgBase::operator=(std::move(static_cast<const cv::GRunArgBase&>(arg)));
meta = std::move(arg.meta);
return *this;
}

View File

@ -2,7 +2,7 @@
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Copyright (C) 2018 Intel Corporation
// Copyright (C) 2018-2020 Intel Corporation
#include "precomp.hpp"
@ -234,6 +234,11 @@ GScalar sum(const GMat& src)
return core::GSum::on(src);
}
GOpaque<int> countNonZero(const GMat& src)
{
return core::GCountNonZero::on(src);
}
GMat addWeighted(const GMat& src1, double alpha, const GMat& src2, double beta, double gamma, int dtype)
{
return core::GAddW::on(src1, alpha, src2, beta, gamma, dtype);
@ -323,11 +328,6 @@ GMat crop(const GMat& src, const Rect& rect)
return core::GCrop::on(src, rect);
}
GMat copy(const GMat& src)
{
return core::GCopy::on(src);
}
GMat concatHor(const GMat& src1, const GMat& src2)
{
return core::GConcatHor::on(src1, src2);
@ -383,14 +383,48 @@ GMat warpAffine(const GMat& src, const Mat& M, const Size& dsize, int flags,
return core::GWarpAffine::on(src, M, dsize, flags, borderMode, borderValue);
}
GOpaque<Size> size(const GMat& src)
std::tuple<GOpaque<double>,GMat,GMat> kmeans(const GMat& data, const int K, const GMat& bestLabels,
const TermCriteria& criteria, const int attempts,
const KmeansFlags flags)
{
return core::GSize::on(src);
return core::GKMeansND::on(data, K, bestLabels, criteria, attempts, flags);
}
GOpaque<Size> size(const GOpaque<Rect>& r)
std::tuple<GOpaque<double>,GMat,GMat> kmeans(const GMat& data, const int K,
const TermCriteria& criteria, const int attempts,
const KmeansFlags flags)
{
return core::GSizeR::on(r);
return core::GKMeansNDNoInit::on(data, K, criteria, attempts, flags);
}
std::tuple<GOpaque<double>,GArray<int>,GArray<Point2f>> kmeans(const GArray<Point2f>& data,
const int K,
const GArray<int>& bestLabels,
const TermCriteria& criteria,
const int attempts,
const KmeansFlags flags)
{
return core::GKMeans2D::on(data, K, bestLabels, criteria, attempts, flags);
}
std::tuple<GOpaque<double>,GArray<int>,GArray<Point3f>> kmeans(const GArray<Point3f>& data,
const int K,
const GArray<int>& bestLabels,
const TermCriteria& criteria,
const int attempts,
const KmeansFlags flags)
{
return core::GKMeans3D::on(data, K, bestLabels, criteria, attempts, flags);
}
GOpaque<Size> streaming::size(const GMat& src)
{
return streaming::GSize::on(src);
}
GOpaque<Size> streaming::size(const GOpaque<Rect>& r)
{
return streaming::GSizeR::on(r);
}
} //namespace gapi

View File

@ -73,6 +73,13 @@ GMat dilate3x3(const GMat& src, int iterations,
return dilate(src, cv::Mat(), cv::Point(-1,-1), iterations, borderType, borderValue);
}
GMat morphologyEx(const GMat &src, const MorphTypes op, const Mat &kernel, const Point &anchor,
const int iterations, const BorderTypes borderType, const Scalar &borderValue)
{
return imgproc::GMorphologyEx::on(src, op, kernel, anchor, iterations,
borderType, borderValue);
}
GMat Sobel(const GMat& src, int ddepth, int dx, int dy, int ksize,
double scale, double delta,
int borderType, const Scalar& bordVal)
@ -115,6 +122,101 @@ cv::GArray<cv::Point2f> goodFeaturesToTrack(const GMat& image, int maxCorners, d
useHarrisDetector, k);
}
GArray<GArray<Point>>
findContours(const GMat &src, const RetrievalModes mode, const ContourApproximationModes method,
const GOpaque<Point> &offset)
{
return imgproc::GFindContours::on(src, mode, method, offset);
}
GArray<GArray<Point>>
findContours(const GMat &src, const RetrievalModes mode, const ContourApproximationModes method)
{
return imgproc::GFindContoursNoOffset::on(src, mode, method);
}
std::tuple<GArray<GArray<Point>>,GArray<Vec4i>>
findContoursH(const GMat &src, const RetrievalModes mode, const ContourApproximationModes method,
const GOpaque<Point> &offset)
{
return imgproc::GFindContoursH::on(src, mode, method, offset);
}
std::tuple<GArray<GArray<Point>>,GArray<Vec4i>>
findContoursH(const GMat &src, const RetrievalModes mode, const ContourApproximationModes method)
{
return imgproc::GFindContoursHNoOffset::on(src, mode, method);
}
GOpaque<Rect> boundingRect(const GMat& src)
{
return imgproc::GBoundingRectMat::on(src);
}
GOpaque<Rect> boundingRect(const GArray<Point2i>& src)
{
return imgproc::GBoundingRectVector32S::on(src);
}
GOpaque<Rect> boundingRect(const GArray<Point2f>& src)
{
return imgproc::GBoundingRectVector32F::on(src);
}
GOpaque<Vec4f> fitLine2D(const GMat& src, const DistanceTypes distType, const double param,
const double reps, const double aeps)
{
return imgproc::GFitLine2DMat::on(src, distType, param, reps, aeps);
}
GOpaque<Vec4f> fitLine2D(const GArray<Point2i>& src, const DistanceTypes distType,
const double param, const double reps, const double aeps)
{
return imgproc::GFitLine2DVector32S::on(src, distType, param, reps, aeps);
}
GOpaque<Vec4f> fitLine2D(const GArray<Point2f>& src, const DistanceTypes distType,
const double param, const double reps, const double aeps)
{
return imgproc::GFitLine2DVector32F::on(src, distType, param, reps, aeps);
}
GOpaque<Vec4f> fitLine2D(const GArray<Point2d>& src, const DistanceTypes distType,
const double param, const double reps, const double aeps)
{
return imgproc::GFitLine2DVector64F::on(src, distType, param, reps, aeps);
}
GOpaque<Vec6f> fitLine3D(const GMat& src, const DistanceTypes distType, const double param,
const double reps, const double aeps)
{
return imgproc::GFitLine3DMat::on(src, distType, param, reps, aeps);
}
GOpaque<Vec6f> fitLine3D(const GArray<Point3i>& src, const DistanceTypes distType,
const double param, const double reps, const double aeps)
{
return imgproc::GFitLine3DVector32S::on(src, distType, param, reps, aeps);
}
GOpaque<Vec6f> fitLine3D(const GArray<Point3f>& src, const DistanceTypes distType,
const double param, const double reps, const double aeps)
{
return imgproc::GFitLine3DVector32F::on(src, distType, param, reps, aeps);
}
GOpaque<Vec6f> fitLine3D(const GArray<Point3d>& src, const DistanceTypes distType,
const double param, const double reps, const double aeps)
{
return imgproc::GFitLine3DVector64F::on(src, distType, param, reps, aeps);
}
GMat BGR2RGB(const GMat& src)
{
return imgproc::GBGR2RGB::on(src);
}
GMat RGB2Gray(const GMat& src)
{
return imgproc::GRGB2Gray::on(src);
@ -160,6 +262,26 @@ GMat YUV2RGB(const GMat& src)
return imgproc::GYUV2RGB::on(src);
}
GMat BGR2I420(const GMat& src)
{
return imgproc::GBGR2I420::on(src);
}
GMat RGB2I420(const GMat& src)
{
return imgproc::GRGB2I420::on(src);
}
GMat I4202BGR(const GMat& src)
{
return imgproc::GI4202BGR::on(src);
}
GMat I4202RGB(const GMat& src)
{
return imgproc::GI4202RGB::on(src);
}
GMat NV12toRGB(const GMat& src_y, const GMat& src_uv)
{
return imgproc::GNV12toRGB::on(src_y, src_uv);

View File

@ -0,0 +1,80 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Copyright (C) 2020 Intel Corporation
#include "precomp.hpp"
#include <opencv2/gapi/streaming/desync.hpp>
#include <opencv2/gapi/streaming/format.hpp>
#include <opencv2/gapi/core.hpp>
cv::GMat cv::gapi::streaming::desync(const cv::GMat &g) {
// FIXME: this is a limited implementation of desync
// The real implementation must be generic (template) and
// reside in desync.hpp (and it is detail::desync<>())
// FIXME: Put a copy here to solve the below problem
// FIXME: Because of the copy, the desync functionality is limited
// to GMat only (we don't have generic copy kernel for other
// object types)
return cv::gapi::copy(detail::desync(g));
// FIXME
//
// If consumed by multiple different islands (OCV and Fluid by
// example, an object needs to be desynchronized individually
// for every path.
//
// This is a limitation of the current implementation. It works
// this way: every "desync" link from the main path to a new
// desync path gets its "DesyncQueue" object which stores only the
// last value written before of the desync object (DO) it consumes
// (the container of type "last written value" or LWV.
//
// LWV
// [Sync path] -> desync() - - > DO -> [ISL0 @ Desync path #1]
//
// At the same time, generally, every island in the streaming
// graph gets its individual input as a queue (so normally, a
// writer pushes the same output MULTIPLE TIMES if it has mutliple
// readers):
//
// LWV
// [Sync path] -> desync() - - > DO1 -> [ISL0 @ Desync path #1]
// : LWV
// ' - - > DO2 -> [ISL1 @ Desync path #1]
//
// For users, it may seem legit to use desync here only once, and
// it MUST BE legit once the problem is fixed.
// But the problem with the current implementation is that islands
// on the same desync path get different desync queues and in fact
// stay desynchronized between each other. One shouldn't consider
// this as a single desync path anymore.
// If these two ISLs are then merged e.g. with add(a,b), the
// results will be inconsistent, given that the latency of ISL0
// and ISL1 may be different. This is not the same frame anymore
// coming as `a` and `b` to add(a,b) because of it.
//
// To make things clear, we forbid this now and ask to call
// desync one more time to allow that. It is bad since the graph
// structure and island layout depends on kernel packages used,
// not on the sole GComputation structure. This needs to be fixed!
// Here's the working configuration:
//
// LWV
// [Sync path] -> desync() - - > DO1 -> [ISL0 @ Desync path #1]
// : LWV
// '-> desync() - - > DO2 -> [ISL1 @ Desync path #2] <-(!)
//
// Put an operation right after desync() is a quick workaround to
// this synchronization problem. There will be one "last_written_value"
// connected to a desynchronized data object, and this sole last_written_value
// object will feed both branches of the streaming executable.
}
cv::GMat cv::gapi::streaming::BGR(const cv::GFrame& in) {
return cv::gapi::streaming::GBGR::on(in);
}

View File

@ -52,5 +52,68 @@ GOptFlowLKOutput calcOpticalFlowPyrLK(const cv::GArray<cv::GMat> &prevPyr,
criteria, flags, minEigThresh);
}
GMat BackgroundSubtractor(const GMat& src, const BackgroundSubtractorParams& bsp)
{
return GBackgroundSubtractor::on(src, bsp);
}
GMat KalmanFilter(const GMat& m, const cv::GOpaque<bool>& have_m, const GMat& c, const KalmanParams& kp)
{
return GKalmanFilter::on(m, have_m, c, kp);
}
GMat KalmanFilter(const GMat& m, const cv::GOpaque<bool>& have_m, const KalmanParams& kp)
{
return GKalmanFilterNoControl::on(m, have_m, kp);
}
namespace video {
void checkParams(const cv::gapi::KalmanParams& kfParams,
const cv::GMatDesc& measurement, const cv::GMatDesc& control)
{
int type = kfParams.transitionMatrix.type();
GAPI_Assert(type == CV_32FC1 || type == CV_64FC1);
int depth = CV_MAT_DEPTH(type);
bool controlCapable = !(control == GMatDesc{});
if (controlCapable)
{
GAPI_Assert(!kfParams.controlMatrix.empty());
GAPI_Assert(control.depth == depth && control.chan == 1 &&
control.size.height == kfParams.controlMatrix.cols &&
control.size.width == 1);
}
else
GAPI_Assert(kfParams.controlMatrix.empty());
GAPI_Assert(!kfParams.state.empty() && kfParams.state.type() == type);
GAPI_Assert(!kfParams.errorCov.empty() && kfParams.errorCov.type() == type);
GAPI_Assert(!kfParams.transitionMatrix.empty() && kfParams.transitionMatrix.type() == type);
GAPI_Assert(!kfParams.processNoiseCov.empty() && kfParams.processNoiseCov.type() == type);
GAPI_Assert(!kfParams.measurementNoiseCov.empty() && kfParams.measurementNoiseCov.type() == type);
GAPI_Assert(!kfParams.measurementMatrix.empty() && kfParams.measurementMatrix.type() == type);
GAPI_Assert(measurement.depth == depth && measurement.chan == 1);
int dDim = kfParams.transitionMatrix.cols;
GAPI_Assert(kfParams.transitionMatrix.rows == dDim);
GAPI_Assert(kfParams.processNoiseCov.cols == dDim &&
kfParams.processNoiseCov.rows == dDim);
GAPI_Assert(kfParams.errorCov.cols == dDim && kfParams.errorCov.rows == dDim);
GAPI_Assert(kfParams.state.rows == dDim && kfParams.state.cols == 1);
GAPI_Assert(kfParams.measurementMatrix.cols == dDim);
int mDim = kfParams.measurementMatrix.rows;
GAPI_Assert(kfParams.measurementNoiseCov.cols == mDim &&
kfParams.measurementNoiseCov.rows == mDim);
if (controlCapable)
GAPI_Assert(kfParams.controlMatrix.rows == dDim);
GAPI_Assert(measurement.size.height == mDim &&
measurement.size.width == 1);
}
} // namespace video
} //namespace gapi
} //namespace cv

View File

@ -0,0 +1,42 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Copyright (C) 2020 Intel Corporation
#include "precomp.hpp"
#include <opencv2/gapi/media.hpp>
struct cv::MediaFrame::Priv {
std::unique_ptr<IAdapter> adapter;
};
cv::MediaFrame::MediaFrame() {
}
cv::MediaFrame::MediaFrame(AdapterPtr &&ptr)
: m(new Priv{std::move(ptr)}) {
}
cv::GFrameDesc cv::MediaFrame::desc() const {
return m->adapter->meta();
}
cv::MediaFrame::View cv::MediaFrame::access(Access code) const {
return m->adapter->access(code);
}
cv::MediaFrame::View::View(Ptrs&& ptrs, Strides&& strs, Callback &&cb)
: ptr (std::move(ptrs))
, stride(std::move(strs))
, m_cb (std::move(cb)) {
}
cv::MediaFrame::View::~View() {
if (m_cb) {
m_cb();
}
}
cv::MediaFrame::IAdapter::~IAdapter() {
}

View File

@ -2,7 +2,7 @@
#include <opencv2/gapi/render/render.hpp> // Kernel API's
#include "api/render_ocv.hpp"
#include "api/ft_render.hpp"
#include "backends/render/ft_render.hpp"
namespace cv
{
@ -146,12 +146,8 @@ struct EmptyConverter
template <typename ColorConverter>
void drawPrimitivesOCV(cv::Mat& in,
const cv::gapi::wip::draw::Prims& prims,
cv::gapi::wip::draw::FTTextRender* ftpr)
std::shared_ptr<cv::gapi::wip::draw::FTTextRender>& ftpr)
{
#ifndef HAVE_FREETYPE
cv::util::suppress_unused_warning(ftpr);
#endif
using namespace cv::gapi::wip::draw;
ColorConverter converter;
@ -177,7 +173,6 @@ void drawPrimitivesOCV(cv::Mat& in,
case Prim::index_of<FText>():
{
#ifdef HAVE_FREETYPE
const auto& ftp = cv::util::get<FText>(p);
const auto color = converter.cvtColor(ftp.color);
@ -196,9 +191,6 @@ void drawPrimitivesOCV(cv::Mat& in,
cv::Point tl(ftp.org.x, ftp.org.y - mask.size().height + baseline);
blendTextMask(in, mask, tl, color);
#else
cv::util::throw_error(std::runtime_error("FreeType not found !"));
#endif
break;
}
@ -251,16 +243,16 @@ void drawPrimitivesOCV(cv::Mat& in,
}
}
void drawPrimitivesOCVBGR(cv::Mat &in,
const cv::gapi::wip::draw::Prims &prims,
cv::gapi::wip::draw::FTTextRender* ftpr)
void drawPrimitivesOCVBGR(cv::Mat &in,
const cv::gapi::wip::draw::Prims &prims,
std::shared_ptr<cv::gapi::wip::draw::FTTextRender> &ftpr)
{
drawPrimitivesOCV<EmptyConverter>(in, prims, ftpr);
}
void drawPrimitivesOCVYUV(cv::Mat &in,
const cv::gapi::wip::draw::Prims &prims,
cv::gapi::wip::draw::FTTextRender* ftpr)
void drawPrimitivesOCVYUV(cv::Mat &in,
const cv::gapi::wip::draw::Prims &prims,
std::shared_ptr<cv::gapi::wip::draw::FTTextRender> &ftpr)
{
drawPrimitivesOCV<BGR2YUVConverter>(in, prims, ftpr);
}

View File

@ -1,6 +1,6 @@
#include <vector>
#include "render_priv.hpp"
#include "ft_render.hpp"
#include "backends/render/ft_render.hpp"
#ifndef OPENCV_RENDER_OCV_HPP
#define OPENCV_RENDER_OCV_HPP
@ -15,8 +15,8 @@ namespace draw
{
// FIXME only for tests
void GAPI_EXPORTS drawPrimitivesOCVYUV(cv::Mat& yuv, const Prims& prims, cv::gapi::wip::draw::FTTextRender* mc);
void GAPI_EXPORTS drawPrimitivesOCVBGR(cv::Mat& bgr, const Prims& prims, cv::gapi::wip::draw::FTTextRender* mc);
void GAPI_EXPORTS drawPrimitivesOCVYUV(cv::Mat& yuv, const Prims& prims, std::shared_ptr<cv::gapi::wip::draw::FTTextRender>& mc);
void GAPI_EXPORTS drawPrimitivesOCVBGR(cv::Mat& bgr, const Prims& prims, std::shared_ptr<cv::gapi::wip::draw::FTTextRender>& mc);
} // namespace draw
} // namespace wip

View File

@ -0,0 +1,75 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Copyright (C) 2020 Intel Corporation
#include <opencv2/gapi/rmat.hpp>
using View = cv::RMat::View;
namespace {
cv::GMatDesc checkDesc(const cv::GMatDesc& desc) {
if (!desc.dims.empty() && desc.chan != -1) {
cv::util::throw_error(
std::logic_error("Multidimesional RMat::Views with chan different from -1 are not supported!"));
}
return desc;
}
int typeFromDesc(const cv::GMatDesc& desc) {
// In multidimensional case GMatDesc::chan is -1,
// change it to 1 when calling CV_MAKE_TYPE
return CV_MAKE_TYPE(desc.depth, desc.chan == -1 ? 1 : desc.chan);
}
static View::stepsT defaultSteps(const cv::GMatDesc& desc) {
const auto& dims = desc.dims.empty()
? std::vector<int>{desc.size.height, desc.size.width}
: desc.dims;
View::stepsT steps(dims.size(), 0u);
auto type = typeFromDesc(desc);
steps.back() = CV_ELEM_SIZE(type);
for (int i = static_cast<int>(dims.size())-2; i >= 0; i--) {
steps[i] = steps[i+1]*dims[i];
}
return steps;
}
} // anonymous namespace
View::View(const cv::GMatDesc& desc, uchar* data, size_t step, DestroyCallback&& cb)
: m_desc(checkDesc(desc))
, m_data(data)
, m_steps([this, step](){
GAPI_Assert(m_desc.dims.empty());
auto steps = defaultSteps(m_desc);
if (step != 0u) {
steps[0] = step;
}
return steps;
}())
, m_cb(std::move(cb)) {
}
View::View(const cv::GMatDesc& desc, uchar* data, const stepsT &steps, DestroyCallback&& cb)
: m_desc(checkDesc(desc))
, m_data(data)
, m_steps(steps == stepsT{} ? defaultSteps(m_desc): steps)
, m_cb(std::move(cb)) {
}
int View::type() const { return typeFromDesc(m_desc); }
// There is an issue with default generated operator=(View&&) on Mac:
// it doesn't nullify m_cb of the moved object
View& View::operator=(View&& v) {
m_desc = v.m_desc;
m_data = v.m_data;
m_steps = v.m_steps;
m_cb = v.m_cb;
v.m_desc = {};
v.m_data = nullptr;
v.m_steps = {0u};
v.m_cb = nullptr;
return *this;
}

View File

@ -10,40 +10,47 @@
#include "backends/common/serialization.hpp"
std::vector<char> cv::gapi::serialize(const cv::GComputation &c) {
cv::gimpl::s11n::ByteMemoryOutStream os;
cv::gapi::s11n::ByteMemoryOutStream os;
c.serialize(os);
return os.data();
}
cv::GComputation cv::gapi::detail::getGraph(const std::vector<char> &p) {
cv::gimpl::s11n::ByteMemoryInStream is(p);
cv::gapi::s11n::ByteMemoryInStream is(p);
return cv::GComputation(is);
}
cv::GMetaArgs cv::gapi::detail::getMetaArgs(const std::vector<char> &p) {
cv::gimpl::s11n::ByteMemoryInStream is(p);
cv::gapi::s11n::ByteMemoryInStream is(p);
return meta_args_deserialize(is);
}
cv::GRunArgs cv::gapi::detail::getRunArgs(const std::vector<char> &p) {
cv::gimpl::s11n::ByteMemoryInStream is(p);
cv::gapi::s11n::ByteMemoryInStream is(p);
return run_args_deserialize(is);
}
std::vector<char> cv::gapi::serialize(const cv::GMetaArgs& ma)
{
cv::gimpl::s11n::ByteMemoryOutStream os;
cv::gapi::s11n::ByteMemoryOutStream os;
serialize(os, ma);
return os.data();
}
std::vector<char> cv::gapi::serialize(const cv::GRunArgs& ra)
{
cv::gimpl::s11n::ByteMemoryOutStream os;
cv::gapi::s11n::ByteMemoryOutStream os;
serialize(os, ra);
return os.data();
}
std::vector<char> cv::gapi::serialize(const cv::GCompileArgs& ca)
{
cv::gapi::s11n::ByteMemoryOutStream os;
serialize(os, ca);
return os.data();
}
// FIXME: This function should move from S11N to GRunArg-related entities.
// it has nothing to do with the S11N as it is
cv::GRunArgsP cv::gapi::bind(cv::GRunArgs &results)
@ -72,6 +79,9 @@ cv::GRunArgsP cv::gapi::bind(cv::GRunArgs &results)
case T::index_of<cv::detail::OpaqueRef>() :
outputs.emplace_back(cv::util::get<cv::detail::OpaqueRef>(res_obj));
break;
case cv::GRunArg::index_of<cv::RMat>() :
outputs.emplace_back((cv::RMat*)(&(cv::util::get<cv::RMat>(res_obj))));
break;
default:
GAPI_Assert(false && "This value type is not supported!"); // ...maybe because of STANDALONE mode.
break;
@ -105,6 +115,9 @@ cv::GRunArg cv::gapi::bind(cv::GRunArgP &out)
case T::index_of<cv::Scalar*>() :
return cv::GRunArg(*cv::util::get<cv::Scalar*>(out));
case T::index_of<cv::RMat*>() :
return cv::GRunArg(*cv::util::get<cv::RMat*>(out));
default:
// ...maybe our types were extended
GAPI_Assert(false && "This value type is UNKNOWN!");

View File

@ -22,14 +22,77 @@
namespace cv {
namespace gimpl {
inline cv::Mat asMat(RMat::View& v) {
#if !defined(GAPI_STANDALONE)
return v.dims().empty() ? cv::Mat(v.rows(), v.cols(), v.type(), v.ptr(), v.step())
: cv::Mat(v.dims(), v.type(), v.ptr(), v.steps().data());
#else
// FIXME: add a check that steps are default
return v.dims().empty() ? cv::Mat(v.rows(), v.cols(), v.type(), v.ptr(), v.step())
: cv::Mat(v.dims(), v.type(), v.ptr());
#endif
}
inline RMat::View asView(const Mat& m, RMat::View::DestroyCallback&& cb = nullptr) {
#if !defined(GAPI_STANDALONE)
RMat::View::stepsT steps(m.dims);
for (int i = 0; i < m.dims; i++) {
steps[i] = m.step[i];
}
return RMat::View(cv::descr_of(m), m.data, steps, std::move(cb));
#else
return RMat::View(cv::descr_of(m), m.data, m.step, std::move(cb));
#endif
}
class RMatAdapter : public RMat::Adapter {
cv::Mat m_mat;
public:
const void* data() const { return m_mat.data; }
RMatAdapter(cv::Mat m) : m_mat(m) {}
virtual RMat::View access(RMat::Access) override { return asView(m_mat); }
virtual cv::GMatDesc desc() const override { return cv::descr_of(m_mat); }
};
// Forward declarations
struct Data;
struct RcDesc;
struct GAPI_EXPORTS RMatMediaAdapterBGR final: public cv::RMat::Adapter
{
explicit RMatMediaAdapterBGR(const cv::MediaFrame& frame) : m_frame(frame) { };
virtual cv::RMat::View access(cv::RMat::Access a) override
{
auto view = m_frame.access(a == cv::RMat::Access::W ? cv::MediaFrame::Access::W
: cv::MediaFrame::Access::R);
auto ptr = reinterpret_cast<uchar*>(view.ptr[0]);
auto stride = view.stride[0];
std::shared_ptr<cv::MediaFrame::View> view_ptr =
std::make_shared<cv::MediaFrame::View>(std::move(view));
auto callback = [view_ptr]() mutable { view_ptr.reset(); };
return cv::RMat::View(desc(), ptr, stride, callback);
}
virtual cv::GMatDesc desc() const override
{
const auto& desc = m_frame.desc();
GAPI_Assert(desc.fmt == cv::MediaFormat::BGR);
return cv::GMatDesc{CV_8U, 3, desc.size};
}
cv::MediaFrame m_frame;
};
namespace magazine {
template<typename... Ts> struct Class
{
template<typename T> using MapT = std::unordered_map<int, T>;
using MapM = std::unordered_map<int, GRunArg::Meta>;
template<typename T> MapT<T>& slot()
{
return std::get<ade::util::type_list_index<T, Ts...>::value>(slots);
@ -38,26 +101,61 @@ namespace magazine {
{
return std::get<ade::util::type_list_index<T, Ts...>::value>(slots);
}
template<typename T> MapM& meta()
{
return metas[ade::util::type_list_index<T, Ts...>::value];
}
template<typename T> const MapM& meta() const
{
return metas[ade::util::type_list_index<T, Ts...>::value];
}
private:
std::tuple<MapT<Ts>...> slots;
std::array<MapM, sizeof...(Ts)> metas;
};
} // namespace magazine
using Mag = magazine::Class< cv::Mat
, cv::Scalar
, cv::detail::VectorRef
, cv::detail::OpaqueRef
, cv::RMat
, cv::RMat::View
, cv::MediaFrame
#if !defined(GAPI_STANDALONE)
using Mag = magazine::Class<cv::Mat, cv::UMat, cv::Scalar, cv::detail::VectorRef, cv::detail::OpaqueRef>;
#else
using Mag = magazine::Class<cv::Mat, cv::Scalar, cv::detail::VectorRef, cv::detail::OpaqueRef>;
, cv::UMat
#endif
>;
namespace magazine
{
void GAPI_EXPORTS bindInArg (Mag& mag, const RcDesc &rc, const GRunArg &arg, bool is_umat = false);
void GAPI_EXPORTS bindOutArg(Mag& mag, const RcDesc &rc, const GRunArgP &arg, bool is_umat = false);
enum class HandleRMat { BIND, SKIP };
// Extracts a memory object from GRunArg, stores it in appropriate slot in a magazine
// Note:
// Only RMats are expected here as a memory object for GMat shape.
// If handleRMat is BIND, RMat will be accessed, and RMat::View and wrapping cv::Mat
// will be placed into the magazine.
// If handleRMat is SKIP, this function skips'RMat handling assuming that backend will do it on its own.
// FIXME?
// handleRMat parameter might be redundant if all device specific backends implement own bind routines
// without utilizing magazine at all
void GAPI_EXPORTS bindInArg (Mag& mag, const RcDesc &rc, const GRunArg &arg, HandleRMat handleRMat = HandleRMat::BIND);
// Extracts a memory object reference fro GRunArgP, stores it in appropriate slot in a magazine
// Note on RMat handling from bindInArg above is also applied here
void GAPI_EXPORTS bindOutArg(Mag& mag, const RcDesc &rc, const GRunArgP &arg, HandleRMat handleRMat = HandleRMat::BIND);
void resetInternalData(Mag& mag, const Data &d);
cv::GRunArg getArg (const Mag& mag, const RcDesc &ref);
cv::GRunArgP getObjPtr ( Mag& mag, const RcDesc &rc, bool is_umat = false);
void writeBack (const Mag& mag, const RcDesc &rc, GRunArgP &g_arg, bool is_umat = false);
void writeBack (const Mag& mag, const RcDesc &rc, GRunArgP &g_arg);
// A mandatory clean-up procedure to force proper lifetime of wrappers (cv::Mat, cv::RMat::View)
// over not-owned data
// FIXME? Add an RAII wrapper for that?
// Or put objects which need to be cleaned-up into a separate stack allocated magazine?
void unbind(Mag &mag, const RcDesc &rc);
} // namespace magazine
namespace detail
@ -90,7 +188,7 @@ inline cv::util::optional<T> getCompileArg(const cv::GCompileArgs &args)
return cv::gapi::getCompileArg<T>(args);
}
void createMat(const cv::GMatDesc& desc, cv::Mat& mat);
void GAPI_EXPORTS createMat(const cv::GMatDesc& desc, cv::Mat& mat);
}} // cv::gimpl

View File

@ -0,0 +1,105 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Copyright (C) 2020 Intel Corporation
#include "precomp.hpp"
#include <opencv2/gapi/gcommon.hpp> // compile args
#include <opencv2/gapi/util/any.hpp> // any
#include <opencv2/gapi/streaming/meta.hpp> // GMeta
#include "compiler/gobjref.hpp" // RcDesc
#include "compiler/gmodel.hpp" // GModel, Op
#include "backends/common/gbackend.hpp"
#include "api/gbackend_priv.hpp" // FIXME: Make it part of Backend SDK!
#include "backends/common/gmetabackend.hpp"
namespace {
class GraphMetaExecutable final: public cv::gimpl::GIslandExecutable {
std::string m_meta_tag;
public:
GraphMetaExecutable(const ade::Graph& g,
const std::vector<ade::NodeHandle>& nodes);
bool canReshape() const override;
void reshape(ade::Graph&, const cv::GCompileArgs&) override;
void run(std::vector<InObj> &&input_objs,
std::vector<OutObj> &&output_objs) override;
};
bool GraphMetaExecutable::canReshape() const {
return true;
}
void GraphMetaExecutable::reshape(ade::Graph&, const cv::GCompileArgs&) {
// do nothing here
}
GraphMetaExecutable::GraphMetaExecutable(const ade::Graph& g,
const std::vector<ade::NodeHandle>& nodes) {
// There may be only one node in the graph
GAPI_Assert(nodes.size() == 1u);
cv::gimpl::GModel::ConstGraph cg(g);
const auto &op = cg.metadata(nodes[0]).get<cv::gimpl::Op>();
GAPI_Assert(op.k.name == cv::gapi::streaming::detail::GMeta::id());
m_meta_tag = op.k.tag;
}
void GraphMetaExecutable::run(std::vector<InObj> &&input_objs,
std::vector<OutObj> &&output_objs) {
GAPI_Assert(input_objs.size() == 1u);
GAPI_Assert(output_objs.size() == 1u);
const cv::GRunArg in_arg = input_objs[0].second;
cv::GRunArgP out_arg = output_objs[0].second;
auto it = in_arg.meta.find(m_meta_tag);
if (it == in_arg.meta.end()) {
cv::util::throw_error
(std::logic_error("Run-time meta "
+ m_meta_tag
+ " is not found in object "
+ std::to_string(static_cast<int>(input_objs[0].first.shape))
+ "/"
+ std::to_string(input_objs[0].first.id)));
}
cv::util::get<cv::detail::OpaqueRef>(out_arg) = it->second;
}
class GraphMetaBackendImpl final: public cv::gapi::GBackend::Priv {
virtual void unpackKernel(ade::Graph &,
const ade::NodeHandle &,
const cv::GKernelImpl &) override {
// Do nothing here
}
virtual EPtr compile(const ade::Graph& graph,
const cv::GCompileArgs&,
const std::vector<ade::NodeHandle>& nodes,
const std::vector<cv::gimpl::Data>&,
const std::vector<cv::gimpl::Data>&) const override {
return EPtr{new GraphMetaExecutable(graph, nodes)};
}
};
cv::gapi::GBackend graph_meta_backend() {
static cv::gapi::GBackend this_backend(std::make_shared<GraphMetaBackendImpl>());
return this_backend;
}
struct InGraphMetaKernel final: public cv::detail::KernelTag {
using API = cv::gapi::streaming::detail::GMeta;
static cv::gapi::GBackend backend() { return graph_meta_backend(); }
static int kernel() { return 42; }
};
} // anonymous namespace
cv::gapi::GKernelPackage cv::gimpl::meta::kernels() {
return cv::gapi::kernels<InGraphMetaKernel>();
}

View File

@ -0,0 +1,16 @@
#ifndef OPENCV_GAPI_SRC_COMMON_META_BACKEND_HPP
#define OPENCV_GAPI_SRC_COMMON_META_BACKEND_HPP
#include <opencv2/gapi/gkernel.hpp>
namespace cv {
namespace gimpl {
namespace meta {
cv::gapi::GKernelPackage kernels();
} // namespace meta
} // namespace gimpl
} // namespace cv
#endif // OPENCV_GAPI_SRC_COMMON_META_BACKEND_HPP

View File

@ -21,11 +21,11 @@
#include "backends/common/serialization.hpp"
namespace cv {
namespace gimpl {
namespace gapi {
namespace s11n {
namespace {
void putData(GSerialized& s, const GModel::ConstGraph& cg, const ade::NodeHandle &nh) {
void putData(GSerialized& s, const cv::gimpl::GModel::ConstGraph& cg, const ade::NodeHandle &nh) {
const auto gdata = cg.metadata(nh).get<gimpl::Data>();
const auto it = ade::util::find_if(s.m_datas, [&gdata](const cv::gimpl::Data &cd) {
return cd.rc == gdata.rc && cd.shape == gdata.shape;
@ -35,7 +35,7 @@ void putData(GSerialized& s, const GModel::ConstGraph& cg, const ade::NodeHandle
}
}
void putOp(GSerialized& s, const GModel::ConstGraph& cg, const ade::NodeHandle &nh) {
void putOp(GSerialized& s, const cv::gimpl::GModel::ConstGraph& cg, const ade::NodeHandle &nh) {
const auto& op = cg.metadata(nh).get<gimpl::Op>();
for (const auto &in_nh : nh->inNodes()) { putData(s, cg, in_nh); }
for (const auto &out_nh : nh->outNodes()) { putData(s, cg, out_nh); }
@ -43,25 +43,25 @@ void putOp(GSerialized& s, const GModel::ConstGraph& cg, const ade::NodeHandle &
}
void mkDataNode(ade::Graph& g, const cv::gimpl::Data& data) {
GModel::Graph gm(g);
cv::gimpl::GModel::Graph gm(g);
auto nh = gm.createNode();
gm.metadata(nh).set(NodeType{NodeType::DATA});
gm.metadata(nh).set(cv::gimpl::NodeType{cv::gimpl::NodeType::DATA});
gm.metadata(nh).set(data);
}
void mkOpNode(ade::Graph& g, const cv::gimpl::Op& op) {
GModel::Graph gm(g);
cv::gimpl::GModel::Graph gm(g);
auto nh = gm.createNode();
gm.metadata(nh).set(NodeType{NodeType::OP});
gm.metadata(nh).set(cv::gimpl::NodeType{cv::gimpl::NodeType::OP});
gm.metadata(nh).set(op);
}
void linkNodes(ade::Graph& g) {
std::map<cv::gimpl::RcDesc, ade::NodeHandle> dataNodes;
GModel::Graph gm(g);
cv::gimpl::GModel::Graph gm(g);
for (const auto& nh : g.nodes()) {
if (gm.metadata(nh).get<NodeType>().t == NodeType::DATA) {
if (gm.metadata(nh).get<cv::gimpl::NodeType>().t == cv::gimpl::NodeType::DATA) {
const auto &d = gm.metadata(nh).get<gimpl::Data>();
const auto rc = cv::gimpl::RcDesc{d.rc, d.shape, d.ctor};
dataNodes[rc] = nh;
@ -69,7 +69,7 @@ void linkNodes(ade::Graph& g) {
}
for (const auto& nh : g.nodes()) {
if (gm.metadata(nh).get<NodeType>().t == NodeType::OP) {
if (gm.metadata(nh).get<cv::gimpl::NodeType>().t == cv::gimpl::NodeType::OP) {
const auto& op = gm.metadata(nh).get<gimpl::Op>();
for (const auto& in : ade::util::indexed(op.args)) {
const auto& arg = ade::util::value(in);
@ -78,7 +78,7 @@ void linkNodes(ade::Graph& g) {
const auto rc = arg.get<gimpl::RcDesc>();
const auto& in_nh = dataNodes.at(rc);
const auto& in_eh = g.link(in_nh, nh);
gm.metadata(in_eh).set(Input{idx});
gm.metadata(in_eh).set(cv::gimpl::Input{idx});
}
}
@ -87,19 +87,20 @@ void linkNodes(ade::Graph& g) {
const auto rc = ade::util::value(out);
const auto& out_nh = dataNodes.at(rc);
const auto& out_eh = g.link(nh, out_nh);
gm.metadata(out_eh).set(Output{idx});
gm.metadata(out_eh).set(cv::gimpl::Output{idx});
}
}
}
}
void relinkProto(ade::Graph& g) {
using namespace cv::gimpl;
// identify which node handles map to the protocol
// input/output object in the reconstructed graph
using S = std::set<cv::gimpl::RcDesc>; // FIXME: use ...
using M = std::map<cv::gimpl::RcDesc, ade::NodeHandle>; // FIXME: unordered!
using S = std::set<RcDesc>; // FIXME: use ...
using M = std::map<RcDesc, ade::NodeHandle>; // FIXME: unordered!
cv::gimpl::GModel::Graph gm(g);
GModel::Graph gm(g);
auto &proto = gm.metadata().get<Protocol>();
const S set_in(proto.inputs.begin(), proto.inputs.end());
@ -108,9 +109,9 @@ void relinkProto(ade::Graph& g) {
// Associate the protocol node handles with their resource identifiers
for (auto &&nh : gm.nodes()) {
if (gm.metadata(nh).get<cv::gimpl::NodeType>().t == cv::gimpl::NodeType::DATA) {
const auto &d = gm.metadata(nh).get<cv::gimpl::Data>();
const auto rc = cv::gimpl::RcDesc{d.rc, d.shape, d.ctor};
if (gm.metadata(nh).get<NodeType>().t == NodeType::DATA) {
const auto &d = gm.metadata(nh).get<Data>();
const auto rc = RcDesc{d.rc, d.shape, d.ctor};
if (set_in.count(rc) > 0) {
GAPI_DbgAssert(set_out.count(rc) == 0);
map_in[rc] = nh;
@ -128,6 +129,12 @@ void relinkProto(ade::Graph& g) {
proto.out_nhs.clear();
for (auto &rc : proto.inputs) { proto.in_nhs .push_back(map_in .at(rc)); }
for (auto &rc : proto.outputs) { proto.out_nhs.push_back(map_out.at(rc)); }
// If a subgraph is being serialized it's possible that
// some of its in/out nodes are INTERNAL in the full graph.
// Set their storage apporpriately
for (auto &nh : proto.in_nhs) { gm.metadata(nh).get<Data>().storage = Data::Storage::INPUT; }
for (auto &nh : proto.out_nhs) { gm.metadata(nh).get<Data>().storage = Data::Storage::OUTPUT; }
}
} // anonymous namespace
@ -138,76 +145,102 @@ void relinkProto(ade::Graph& g) {
// OpenCV types ////////////////////////////////////////////////////////////////
I::OStream& operator<< (I::OStream& os, const cv::Point &pt) {
IOStream& operator<< (IOStream& os, const cv::Point &pt) {
return os << pt.x << pt.y;
}
I::IStream& operator>> (I::IStream& is, cv::Point& pt) {
IIStream& operator>> (IIStream& is, cv::Point& pt) {
return is >> pt.x >> pt.y;
}
I::OStream& operator<< (I::OStream& os, const cv::Size &sz) {
IOStream& operator<< (IOStream& os, const cv::Point2f &pt) {
return os << pt.x << pt.y;
}
IIStream& operator>> (IIStream& is, cv::Point2f& pt) {
return is >> pt.x >> pt.y;
}
IOStream& operator<< (IOStream& os, const cv::Size &sz) {
return os << sz.width << sz.height;
}
I::IStream& operator>> (I::IStream& is, cv::Size& sz) {
IIStream& operator>> (IIStream& is, cv::Size& sz) {
return is >> sz.width >> sz.height;
}
I::OStream& operator<< (I::OStream& os, const cv::Rect &rc) {
IOStream& operator<< (IOStream& os, const cv::Rect &rc) {
return os << rc.x << rc.y << rc.width << rc.height;
}
I::IStream& operator>> (I::IStream& is, cv::Rect& rc) {
IIStream& operator>> (IIStream& is, cv::Rect& rc) {
return is >> rc.x >> rc.y >> rc.width >> rc.height;
}
I::OStream& operator<< (I::OStream& os, const cv::Scalar &s) {
IOStream& operator<< (IOStream& os, const cv::Scalar &s) {
return os << s.val[0] << s.val[1] << s.val[2] << s.val[3];
}
I::IStream& operator>> (I::IStream& is, cv::Scalar& s) {
IIStream& operator>> (IIStream& is, cv::Scalar& s) {
return is >> s.val[0] >> s.val[1] >> s.val[2] >> s.val[3];
}
IOStream& operator<< (IOStream& os, const cv::RMat& mat) {
mat.serialize(os);
return os;
}
IIStream& operator>> (IIStream& is, cv::RMat&) {
util::throw_error(std::logic_error("operator>> for RMat should never be called"));
return is;
}
IOStream& operator<< (IOStream& os, const cv::MediaFrame &) {
// Stub
GAPI_Assert(false && "cv::MediaFrame serialization is not supported!");
return os;
}
IIStream& operator>> (IIStream& is, cv::MediaFrame &) {
// Stub
GAPI_Assert(false && "cv::MediaFrame serialization is not supported!");
return is;
}
namespace
{
#if !defined(GAPI_STANDALONE)
template<typename T>
void write_plain(I::OStream &os, const T *arr, std::size_t sz) {
void write_plain(IOStream &os, const T *arr, std::size_t sz) {
for (auto &&it : ade::util::iota(sz)) os << arr[it];
}
template<typename T>
void read_plain(I::IStream &is, T *arr, std::size_t sz) {
void read_plain(IIStream &is, T *arr, std::size_t sz) {
for (auto &&it : ade::util::iota(sz)) is >> arr[it];
}
template<typename T>
void write_mat_data(I::OStream &os, const cv::Mat &m) {
void write_mat_data(IOStream &os, const cv::Mat &m) {
// Write every row individually (handles the case when Mat is a view)
for (auto &&r : ade::util::iota(m.rows)) {
write_plain(os, m.ptr<T>(r), m.cols*m.channels());
}
}
template<typename T>
void read_mat_data(I::IStream &is, cv::Mat &m) {
void read_mat_data(IIStream &is, cv::Mat &m) {
// Write every row individually (handles the case when Mat is aligned)
for (auto &&r : ade::util::iota(m.rows)) {
read_plain(is, m.ptr<T>(r), m.cols*m.channels());
}
}
#else
void write_plain(I::OStream &os, const uchar *arr, std::size_t sz) {
void write_plain(IOStream &os, const uchar *arr, std::size_t sz) {
for (auto &&it : ade::util::iota(sz)) os << arr[it];
}
void read_plain(I::IStream &is, uchar *arr, std::size_t sz) {
void read_plain(IIStream &is, uchar *arr, std::size_t sz) {
for (auto &&it : ade::util::iota(sz)) is >> arr[it];
}
template<typename T>
void write_mat_data(I::OStream &os, const cv::Mat &m) {
void write_mat_data(IOStream &os, const cv::Mat &m) {
// Write every row individually (handles the case when Mat is a view)
for (auto &&r : ade::util::iota(m.rows)) {
write_plain(os, m.ptr(r), m.cols*m.channels()*sizeof(T));
}
}
template<typename T>
void read_mat_data(I::IStream &is, cv::Mat &m) {
void read_mat_data(IIStream &is, cv::Mat &m) {
// Write every row individually (handles the case when Mat is aligned)
for (auto &&r : ade::util::iota(m.rows)) {
read_plain(is, m.ptr(r), m.cols*m.channels()*sizeof(T));
@ -216,7 +249,7 @@ void read_mat_data(I::IStream &is, cv::Mat &m) {
#endif
} // namespace
I::OStream& operator<< (I::OStream& os, const cv::Mat &m) {
IOStream& operator<< (IOStream& os, const cv::Mat &m) {
#if !defined(GAPI_STANDALONE)
GAPI_Assert(m.size.dims() == 2 && "Only 2D images are supported now");
#else
@ -235,7 +268,7 @@ I::OStream& operator<< (I::OStream& os, const cv::Mat &m) {
}
return os;
}
I::IStream& operator>> (I::IStream& is, cv::Mat& m) {
IIStream& operator>> (IIStream& is, cv::Mat& m) {
int rows = -1, cols = -1, type = 0;
is >> rows >> cols >> type;
m.create(cv::Size(cols, rows), type);
@ -252,97 +285,109 @@ I::IStream& operator>> (I::IStream& is, cv::Mat& m) {
return is;
}
I::OStream& operator<< (I::OStream& os, const cv::gapi::wip::draw::Text &t) {
IOStream& operator<< (IOStream& os, const cv::gapi::wip::draw::Text &t) {
return os << t.bottom_left_origin << t.color << t.ff << t.fs << t.lt << t.org << t.text << t.thick;
}
I::IStream& operator>> (I::IStream& is, cv::gapi::wip::draw::Text &t) {
IIStream& operator>> (IIStream& is, cv::gapi::wip::draw::Text &t) {
return is >> t.bottom_left_origin >> t.color >> t.ff >> t.fs >> t.lt >> t.org >> t.text >> t.thick;
}
I::OStream& operator<< (I::OStream&, const cv::gapi::wip::draw::FText &) {
IOStream& operator<< (IOStream&, const cv::gapi::wip::draw::FText &) {
GAPI_Assert(false && "Serialization: Unsupported << for FText");
}
I::IStream& operator>> (I::IStream&, cv::gapi::wip::draw::FText &) {
IIStream& operator>> (IIStream&, cv::gapi::wip::draw::FText &) {
GAPI_Assert(false && "Serialization: Unsupported >> for FText");
}
I::OStream& operator<< (I::OStream& os, const cv::gapi::wip::draw::Circle &c) {
IOStream& operator<< (IOStream& os, const cv::gapi::wip::draw::Circle &c) {
return os << c.center << c.color << c.lt << c.radius << c.shift << c.thick;
}
I::IStream& operator>> (I::IStream& is, cv::gapi::wip::draw::Circle &c) {
IIStream& operator>> (IIStream& is, cv::gapi::wip::draw::Circle &c) {
return is >> c.center >> c.color >> c.lt >> c.radius >> c.shift >> c.thick;
}
I::OStream& operator<< (I::OStream& os, const cv::gapi::wip::draw::Rect &r) {
IOStream& operator<< (IOStream& os, const cv::gapi::wip::draw::Rect &r) {
return os << r.color << r.lt << r.rect << r.shift << r.thick;
}
I::IStream& operator>> (I::IStream& is, cv::gapi::wip::draw::Rect &r) {
IIStream& operator>> (IIStream& is, cv::gapi::wip::draw::Rect &r) {
return is >> r.color >> r.lt >> r.rect >> r.shift >> r.thick;
}
I::OStream& operator<< (I::OStream& os, const cv::gapi::wip::draw::Image &i) {
IOStream& operator<< (IOStream& os, const cv::gapi::wip::draw::Image &i) {
return os << i.org << i.alpha << i.img;
}
I::IStream& operator>> (I::IStream& is, cv::gapi::wip::draw::Image &i) {
IIStream& operator>> (IIStream& is, cv::gapi::wip::draw::Image &i) {
return is >> i.org >> i.alpha >> i.img;
}
I::OStream& operator<< (I::OStream& os, const cv::gapi::wip::draw::Mosaic &m) {
IOStream& operator<< (IOStream& os, const cv::gapi::wip::draw::Mosaic &m) {
return os << m.cellSz << m.decim << m.mos;
}
I::IStream& operator>> (I::IStream& is, cv::gapi::wip::draw::Mosaic &m) {
IIStream& operator>> (IIStream& is, cv::gapi::wip::draw::Mosaic &m) {
return is >> m.cellSz >> m.decim >> m.mos;
}
I::OStream& operator<< (I::OStream& os, const cv::gapi::wip::draw::Poly &p) {
IOStream& operator<< (IOStream& os, const cv::gapi::wip::draw::Poly &p) {
return os << p.color << p.lt << p.points << p.shift << p.thick;
}
I::IStream& operator>> (I::IStream& is, cv::gapi::wip::draw::Poly &p) {
IIStream& operator>> (IIStream& is, cv::gapi::wip::draw::Poly &p) {
return is >> p.color >> p.lt >> p.points >> p.shift >> p.thick;
}
I::OStream& operator<< (I::OStream& os, const cv::gapi::wip::draw::Line &l) {
IOStream& operator<< (IOStream& os, const cv::gapi::wip::draw::Line &l) {
return os << l.color << l.lt << l.pt1 << l.pt2 << l.shift << l.thick;
}
I::IStream& operator>> (I::IStream& is, cv::gapi::wip::draw::Line &l) {
IIStream& operator>> (IIStream& is, cv::gapi::wip::draw::Line &l) {
return is >> l.color >> l.lt >> l.pt1 >> l.pt2 >> l.shift >> l.thick;
}
// G-API types /////////////////////////////////////////////////////////////////
IOStream& operator<< (IOStream& os, const cv::GCompileArg& arg)
{
ByteMemoryOutStream tmpS;
arg.serialize(tmpS);
std::vector<char> data = tmpS.data();
os << arg.tag;
os << data;
return os;
}
// Stubs (empty types)
I::OStream& operator<< (I::OStream& os, cv::util::monostate ) {return os;}
I::IStream& operator>> (I::IStream& is, cv::util::monostate &) {return is;}
IOStream& operator<< (IOStream& os, cv::util::monostate ) {return os;}
IIStream& operator>> (IIStream& is, cv::util::monostate &) {return is;}
I::OStream& operator<< (I::OStream& os, const cv::GScalarDesc &) {return os;}
I::IStream& operator>> (I::IStream& is, cv::GScalarDesc &) {return is;}
IOStream& operator<< (IOStream& os, const cv::GScalarDesc &) {return os;}
IIStream& operator>> (IIStream& is, cv::GScalarDesc &) {return is;}
I::OStream& operator<< (I::OStream& os, const cv::GOpaqueDesc &) {return os;}
I::IStream& operator>> (I::IStream& is, cv::GOpaqueDesc &) {return is;}
IOStream& operator<< (IOStream& os, const cv::GOpaqueDesc &) {return os;}
IIStream& operator>> (IIStream& is, cv::GOpaqueDesc &) {return is;}
I::OStream& operator<< (I::OStream& os, const cv::GArrayDesc &) {return os;}
I::IStream& operator>> (I::IStream& is, cv::GArrayDesc &) {return is;}
IOStream& operator<< (IOStream& os, const cv::GArrayDesc &) {return os;}
IIStream& operator>> (IIStream& is, cv::GArrayDesc &) {return is;}
#if !defined(GAPI_STANDALONE)
I::OStream& operator<< (I::OStream& os, const cv::UMat &)
IOStream& operator<< (IOStream& os, const cv::UMat &)
{
GAPI_Assert(false && "Serialization: Unsupported << for UMat");
return os;
}
I::IStream& operator >> (I::IStream& is, cv::UMat &)
IIStream& operator >> (IIStream& is, cv::UMat &)
{
GAPI_Assert(false && "Serialization: Unsupported >> for UMat");
return is;
}
#endif // !defined(GAPI_STANDALONE)
I::OStream& operator<< (I::OStream& os, const cv::gapi::wip::IStreamSource::Ptr &)
IOStream& operator<< (IOStream& os, const cv::gapi::wip::IStreamSource::Ptr &)
{
GAPI_Assert(false && "Serialization: Unsupported << for IStreamSource::Ptr");
return os;
}
I::IStream& operator >> (I::IStream& is, cv::gapi::wip::IStreamSource::Ptr &)
IIStream& operator >> (IIStream& is, cv::gapi::wip::IStreamSource::Ptr &)
{
GAPI_Assert("Serialization: Unsupported >> for IStreamSource::Ptr");
return is;
@ -356,7 +401,7 @@ struct putToStream;
template<typename Ref>
struct putToStream<Ref, std::tuple<>>
{
static void put(I::OStream&, const Ref &)
static void put(IOStream&, const Ref &)
{
GAPI_Assert(false && "Unsupported type for GArray/GOpaque serialization");
}
@ -365,7 +410,7 @@ struct putToStream<Ref, std::tuple<>>
template<typename Ref, typename T, typename... Ts>
struct putToStream<Ref, std::tuple<T, Ts...>>
{
static void put(I::OStream& os, const Ref &r)
static void put(IOStream& os, const Ref &r)
{
if (r.getKind() == cv::detail::GOpaqueTraits<T>::kind) {
os << r.template rref<T>();
@ -381,7 +426,7 @@ struct getFromStream;
template<typename Ref>
struct getFromStream<Ref, std::tuple<>>
{
static void get(I::IStream&, Ref &, cv::detail::OpaqueKind)
static void get(IIStream&, Ref &, cv::detail::OpaqueKind)
{
GAPI_Assert(false && "Unsupported type for GArray/GOpaque deserialization");
}
@ -390,7 +435,7 @@ struct getFromStream<Ref, std::tuple<>>
template<typename Ref, typename T, typename... Ts>
struct getFromStream<Ref, std::tuple<T, Ts...>>
{
static void get(I::IStream& is, Ref &r, cv::detail::OpaqueKind kind) {
static void get(IIStream& is, Ref &r, cv::detail::OpaqueKind kind) {
if (kind == cv::detail::GOpaqueTraits<T>::kind) {
r.template reset<T>();
auto& val = r.template wref<T>();
@ -402,13 +447,13 @@ struct getFromStream<Ref, std::tuple<T, Ts...>>
};
}
I::OStream& operator<< (I::OStream& os, const cv::detail::VectorRef& ref)
IOStream& operator<< (IOStream& os, const cv::detail::VectorRef& ref)
{
os << ref.getKind();
putToStream<cv::detail::VectorRef, cv::detail::GOpaqueTraitsArrayTypes>::put(os, ref);
return os;
}
I::IStream& operator >> (I::IStream& is, cv::detail::VectorRef& ref)
IIStream& operator >> (IIStream& is, cv::detail::VectorRef& ref)
{
cv::detail::OpaqueKind kind;
is >> kind;
@ -416,13 +461,13 @@ I::IStream& operator >> (I::IStream& is, cv::detail::VectorRef& ref)
return is;
}
I::OStream& operator<< (I::OStream& os, const cv::detail::OpaqueRef& ref)
IOStream& operator<< (IOStream& os, const cv::detail::OpaqueRef& ref)
{
os << ref.getKind();
putToStream<cv::detail::OpaqueRef, cv::detail::GOpaqueTraitsOpaqueTypes>::put(os, ref);
return os;
}
I::IStream& operator >> (I::IStream& is, cv::detail::OpaqueRef& ref)
IIStream& operator >> (IIStream& is, cv::detail::OpaqueRef& ref)
{
cv::detail::OpaqueKind kind;
is >> kind;
@ -432,41 +477,41 @@ I::IStream& operator >> (I::IStream& is, cv::detail::OpaqueRef& ref)
// Enums and structures
namespace {
template<typename E> I::OStream& put_enum(I::OStream& os, E e) {
template<typename E> IOStream& put_enum(IOStream& os, E e) {
return os << static_cast<int>(e);
}
template<typename E> I::IStream& get_enum(I::IStream& is, E &e) {
template<typename E> IIStream& get_enum(IIStream& is, E &e) {
int x{}; is >> x; e = static_cast<E>(x);
return is;
}
} // anonymous namespace
I::OStream& operator<< (I::OStream& os, cv::GShape sh) {
IOStream& operator<< (IOStream& os, cv::GShape sh) {
return put_enum(os, sh);
}
I::IStream& operator>> (I::IStream& is, cv::GShape &sh) {
IIStream& operator>> (IIStream& is, cv::GShape &sh) {
return get_enum<cv::GShape>(is, sh);
}
I::OStream& operator<< (I::OStream& os, cv::detail::ArgKind k) {
IOStream& operator<< (IOStream& os, cv::detail::ArgKind k) {
return put_enum(os, k);
}
I::IStream& operator>> (I::IStream& is, cv::detail::ArgKind &k) {
IIStream& operator>> (IIStream& is, cv::detail::ArgKind &k) {
return get_enum<cv::detail::ArgKind>(is, k);
}
I::OStream& operator<< (I::OStream& os, cv::detail::OpaqueKind k) {
IOStream& operator<< (IOStream& os, cv::detail::OpaqueKind k) {
return put_enum(os, k);
}
I::IStream& operator>> (I::IStream& is, cv::detail::OpaqueKind &k) {
IIStream& operator>> (IIStream& is, cv::detail::OpaqueKind &k) {
return get_enum<cv::detail::OpaqueKind>(is, k);
}
I::OStream& operator<< (I::OStream& os, cv::gimpl::Data::Storage s) {
IOStream& operator<< (IOStream& os, cv::gimpl::Data::Storage s) {
return put_enum(os, s);
}
I::IStream& operator>> (I::IStream& is, cv::gimpl::Data::Storage &s) {
IIStream& operator>> (IIStream& is, cv::gimpl::Data::Storage &s) {
return get_enum<cv::gimpl::Data::Storage>(is, s);
}
I::OStream& operator<< (I::OStream& os, const cv::GArg &arg) {
IOStream& operator<< (IOStream& os, const cv::GArg &arg) {
// Only GOBJREF and OPAQUE_VAL kinds can be serialized/deserialized
GAPI_Assert( arg.kind == cv::detail::ArgKind::OPAQUE_VAL
|| arg.kind == cv::detail::ArgKind::GOBJREF);
@ -478,21 +523,24 @@ I::OStream& operator<< (I::OStream& os, const cv::GArg &arg) {
GAPI_Assert(arg.kind == cv::detail::ArgKind::OPAQUE_VAL);
GAPI_Assert(arg.opaque_kind != cv::detail::OpaqueKind::CV_UNKNOWN);
switch (arg.opaque_kind) {
case cv::detail::OpaqueKind::CV_BOOL: os << arg.get<bool>(); break;
case cv::detail::OpaqueKind::CV_INT: os << arg.get<int>(); break;
case cv::detail::OpaqueKind::CV_DOUBLE: os << arg.get<double>(); break;
case cv::detail::OpaqueKind::CV_POINT: os << arg.get<cv::Point>(); break;
case cv::detail::OpaqueKind::CV_SIZE: os << arg.get<cv::Size>(); break;
case cv::detail::OpaqueKind::CV_RECT: os << arg.get<cv::Rect>(); break;
case cv::detail::OpaqueKind::CV_SCALAR: os << arg.get<cv::Scalar>(); break;
case cv::detail::OpaqueKind::CV_MAT: os << arg.get<cv::Mat>(); break;
case cv::detail::OpaqueKind::CV_BOOL: os << arg.get<bool>(); break;
case cv::detail::OpaqueKind::CV_INT: os << arg.get<int>(); break;
case cv::detail::OpaqueKind::CV_UINT64: os << arg.get<uint64_t>(); break;
case cv::detail::OpaqueKind::CV_DOUBLE: os << arg.get<double>(); break;
case cv::detail::OpaqueKind::CV_FLOAT: os << arg.get<float>(); break;
case cv::detail::OpaqueKind::CV_STRING: os << arg.get<std::string>(); break;
case cv::detail::OpaqueKind::CV_POINT: os << arg.get<cv::Point>(); break;
case cv::detail::OpaqueKind::CV_SIZE: os << arg.get<cv::Size>(); break;
case cv::detail::OpaqueKind::CV_RECT: os << arg.get<cv::Rect>(); break;
case cv::detail::OpaqueKind::CV_SCALAR: os << arg.get<cv::Scalar>(); break;
case cv::detail::OpaqueKind::CV_MAT: os << arg.get<cv::Mat>(); break;
default: GAPI_Assert(false && "GArg: Unsupported (unknown?) opaque value type");
}
}
return os;
}
I::IStream& operator>> (I::IStream& is, cv::GArg &arg) {
IIStream& operator>> (IIStream& is, cv::GArg &arg) {
is >> arg.kind >> arg.opaque_kind;
// Only GOBJREF and OPAQUE_VAL kinds can be serialized/deserialized
@ -509,14 +557,18 @@ I::IStream& operator>> (I::IStream& is, cv::GArg &arg) {
switch (arg.opaque_kind) {
#define HANDLE_CASE(E,T) case cv::detail::OpaqueKind::CV_##E: \
{ T t{}; is >> t; arg = (cv::GArg(t)); } break
HANDLE_CASE(BOOL , bool);
HANDLE_CASE(INT , int);
HANDLE_CASE(DOUBLE , double);
HANDLE_CASE(POINT , cv::Point);
HANDLE_CASE(SIZE , cv::Size);
HANDLE_CASE(RECT , cv::Rect);
HANDLE_CASE(SCALAR , cv::Scalar);
HANDLE_CASE(MAT , cv::Mat);
HANDLE_CASE(BOOL , bool);
HANDLE_CASE(INT , int);
HANDLE_CASE(UINT64 , uint64_t);
HANDLE_CASE(DOUBLE , double);
HANDLE_CASE(FLOAT , float);
HANDLE_CASE(STRING , std::string);
HANDLE_CASE(POINT , cv::Point);
HANDLE_CASE(POINT2F , cv::Point2f);
HANDLE_CASE(SIZE , cv::Size);
HANDLE_CASE(RECT , cv::Rect);
HANDLE_CASE(SCALAR , cv::Scalar);
HANDLE_CASE(MAT , cv::Mat);
#undef HANDLE_CASE
default: GAPI_Assert(false && "GArg: Unsupported (unknown?) opaque value type");
}
@ -524,43 +576,49 @@ I::IStream& operator>> (I::IStream& is, cv::GArg &arg) {
return is;
}
I::OStream& operator<< (I::OStream& os, const cv::GKernel &k) {
IOStream& operator<< (IOStream& os, const cv::GKernel &k) {
return os << k.name << k.tag << k.outShapes;
}
I::IStream& operator>> (I::IStream& is, cv::GKernel &k) {
IIStream& operator>> (IIStream& is, cv::GKernel &k) {
return is >> const_cast<std::string&>(k.name)
>> const_cast<std::string&>(k.tag)
>> const_cast<cv::GShapes&>(k.outShapes);
}
I::OStream& operator<< (I::OStream& os, const cv::GMatDesc &d) {
IOStream& operator<< (IOStream& os, const cv::GMatDesc &d) {
return os << d.depth << d.chan << d.size << d.planar << d.dims;
}
I::IStream& operator>> (I::IStream& is, cv::GMatDesc &d) {
IIStream& operator>> (IIStream& is, cv::GMatDesc &d) {
return is >> d.depth >> d.chan >> d.size >> d.planar >> d.dims;
}
IOStream& operator<< (IOStream& os, const cv::GFrameDesc &d) {
return put_enum(os, d.fmt) << d.size;
}
IIStream& operator>> (IIStream& is, cv::GFrameDesc &d) {
return get_enum(is, d.fmt) >> d.size;
}
I::OStream& operator<< (I::OStream& os, const cv::gimpl::RcDesc &rc) {
IOStream& operator<< (IOStream& os, const cv::gimpl::RcDesc &rc) {
// FIXME: HostCtor is not serialized!
return os << rc.id << rc.shape;
}
I::IStream& operator>> (I::IStream& is, cv::gimpl::RcDesc &rc) {
IIStream& operator>> (IIStream& is, cv::gimpl::RcDesc &rc) {
// FIXME: HostCtor is not deserialized!
return is >> rc.id >> rc.shape;
}
I::OStream& operator<< (I::OStream& os, const cv::gimpl::Op &op) {
IOStream& operator<< (IOStream& os, const cv::gimpl::Op &op) {
return os << op.k << op.args << op.outs;
}
I::IStream& operator>> (I::IStream& is, cv::gimpl::Op &op) {
IIStream& operator>> (IIStream& is, cv::gimpl::Op &op) {
return is >> op.k >> op.args >> op.outs;
}
I::OStream& operator<< (I::OStream& os, const cv::gimpl::Data &d) {
IOStream& operator<< (IOStream& os, const cv::gimpl::Data &d) {
// FIXME: HostCtor is not stored here!!
// FIXME: Storage may be incorrect for subgraph-to-graph process
return os << d.shape << d.rc << d.meta << d.storage << d.kind;
@ -594,7 +652,7 @@ struct initCtor<Ref, std::tuple<T, Ts...>>
};
} // anonymous namespace
I::IStream& operator>> (I::IStream& is, cv::gimpl::Data &d) {
IIStream& operator>> (IIStream& is, cv::gimpl::Data &d) {
// FIXME: HostCtor is not stored here!!
// FIXME: Storage may be incorrect for subgraph-to-graph process
is >> d.shape >> d.rc >> d.meta >> d.storage >> d.kind;
@ -610,42 +668,42 @@ I::IStream& operator>> (I::IStream& is, cv::gimpl::Data &d) {
}
I::OStream& operator<< (I::OStream& os, const cv::gimpl::DataObjectCounter &c) {
IOStream& operator<< (IOStream& os, const cv::gimpl::DataObjectCounter &c) {
return os << c.m_next_data_id;
}
I::IStream& operator>> (I::IStream& is, cv::gimpl::DataObjectCounter &c) {
IIStream& operator>> (IIStream& is, cv::gimpl::DataObjectCounter &c) {
return is >> c.m_next_data_id;
}
I::OStream& operator<< (I::OStream& os, const cv::gimpl::Protocol &p) {
IOStream& operator<< (IOStream& os, const cv::gimpl::Protocol &p) {
// NB: in_nhs/out_nhs are not written!
return os << p.inputs << p.outputs;
}
I::IStream& operator>> (I::IStream& is, cv::gimpl::Protocol &p) {
IIStream& operator>> (IIStream& is, cv::gimpl::Protocol &p) {
// NB: in_nhs/out_nhs are reconstructed at a later phase
return is >> p.inputs >> p.outputs;
}
void serialize( I::OStream& os
void serialize( IOStream& os
, const ade::Graph &g
, const std::vector<ade::NodeHandle> &nodes) {
cv::gimpl::GModel::ConstGraph cg(g);
serialize(os, g, cg.metadata().get<cv::gimpl::Protocol>(), nodes);
}
void serialize( I::OStream& os
void serialize( IOStream& os
, const ade::Graph &g
, const cv::gimpl::Protocol &p
, const std::vector<ade::NodeHandle> &nodes) {
cv::gimpl::GModel::ConstGraph cg(g);
GSerialized s;
for (auto &nh : nodes) {
switch (cg.metadata(nh).get<NodeType>().t)
switch (cg.metadata(nh).get<cv::gimpl::NodeType>().t)
{
case NodeType::OP: putOp (s, cg, nh); break;
case NodeType::DATA: putData(s, cg, nh); break;
case cv::gimpl::NodeType::OP: putOp (s, cg, nh); break;
case cv::gimpl::NodeType::DATA: putData(s, cg, nh); break;
default: util::throw_error(std::logic_error("Unknown NodeType"));
}
}
@ -654,7 +712,7 @@ void serialize( I::OStream& os
os << s.m_ops << s.m_datas << s.m_counter << s.m_proto;
}
GSerialized deserialize(I::IStream &is) {
GSerialized deserialize(IIStream &is) {
GSerialized s;
is >> s.m_ops >> s.m_datas >> s.m_counter >> s.m_proto;
return s;
@ -662,14 +720,14 @@ GSerialized deserialize(I::IStream &is) {
void reconstruct(const GSerialized &s, ade::Graph &g) {
GAPI_Assert(g.nodes().empty());
for (const auto& d : s.m_datas) cv::gimpl::s11n::mkDataNode(g, d);
for (const auto& op : s.m_ops) cv::gimpl::s11n::mkOpNode(g, op);
cv::gimpl::s11n::linkNodes(g);
for (const auto& d : s.m_datas) cv::gapi::s11n::mkDataNode(g, d);
for (const auto& op : s.m_ops) cv::gapi::s11n::mkOpNode(g, op);
cv::gapi::s11n::linkNodes(g);
cv::gimpl::GModel::Graph gm(g);
gm.metadata().set(s.m_counter);
gm.metadata().set(s.m_proto);
cv::gimpl::s11n::relinkProto(g);
cv::gapi::s11n::relinkProto(g);
gm.metadata().set(cv::gimpl::Deserialized{});
}
@ -679,48 +737,54 @@ void reconstruct(const GSerialized &s, ade::Graph &g) {
const std::vector<char>& ByteMemoryOutStream::data() const {
return m_storage;
}
I::OStream& ByteMemoryOutStream::operator<< (uint32_t atom) {
IOStream& ByteMemoryOutStream::operator<< (uint32_t atom) {
m_storage.push_back(0xFF & (atom));
m_storage.push_back(0xFF & (atom >> 8));
m_storage.push_back(0xFF & (atom >> 16));
m_storage.push_back(0xFF & (atom >> 24));
return *this;
}
I::OStream& ByteMemoryOutStream::operator<< (bool atom) {
IOStream& ByteMemoryOutStream::operator<< (uint64_t atom) {
for (int i = 0; i < 8; ++i) {
m_storage.push_back(0xFF & (atom >> (i * 8)));;
}
return *this;
}
IOStream& ByteMemoryOutStream::operator<< (bool atom) {
m_storage.push_back(atom ? 1 : 0);
return *this;
}
I::OStream& ByteMemoryOutStream::operator<< (char atom) {
IOStream& ByteMemoryOutStream::operator<< (char atom) {
m_storage.push_back(atom);
return *this;
}
I::OStream& ByteMemoryOutStream::operator<< (unsigned char atom) {
IOStream& ByteMemoryOutStream::operator<< (unsigned char atom) {
return *this << static_cast<char>(atom);
}
I::OStream& ByteMemoryOutStream::operator<< (short atom) {
IOStream& ByteMemoryOutStream::operator<< (short atom) {
static_assert(sizeof(short) == 2, "Expecting sizeof(short) == 2");
m_storage.push_back(0xFF & (atom));
m_storage.push_back(0xFF & (atom >> 8));
return *this;
}
I::OStream& ByteMemoryOutStream::operator<< (unsigned short atom) {
IOStream& ByteMemoryOutStream::operator<< (unsigned short atom) {
return *this << static_cast<short>(atom);
}
I::OStream& ByteMemoryOutStream::operator<< (int atom) {
IOStream& ByteMemoryOutStream::operator<< (int atom) {
static_assert(sizeof(int) == 4, "Expecting sizeof(int) == 4");
return *this << static_cast<uint32_t>(atom);
}
//I::OStream& ByteMemoryOutStream::operator<< (std::size_t atom) {
//IOStream& ByteMemoryOutStream::operator<< (std::size_t atom) {
// // NB: type truncated!
// return *this << static_cast<uint32_t>(atom);
//}
I::OStream& ByteMemoryOutStream::operator<< (float atom) {
IOStream& ByteMemoryOutStream::operator<< (float atom) {
static_assert(sizeof(float) == 4, "Expecting sizeof(float) == 4");
uint32_t tmp = 0u;
memcpy(&tmp, &atom, sizeof(float));
return *this << static_cast<uint32_t>(htonl(tmp));
}
I::OStream& ByteMemoryOutStream::operator<< (double atom) {
IOStream& ByteMemoryOutStream::operator<< (double atom) {
static_assert(sizeof(double) == 8, "Expecting sizeof(double) == 8");
uint32_t tmp[2] = {0u};
memcpy(tmp, &atom, sizeof(double));
@ -728,17 +792,16 @@ I::OStream& ByteMemoryOutStream::operator<< (double atom) {
*this << static_cast<uint32_t>(htonl(tmp[1]));
return *this;
}
I::OStream& ByteMemoryOutStream::operator<< (const std::string &str) {
IOStream& ByteMemoryOutStream::operator<< (const std::string &str) {
//*this << static_cast<std::size_t>(str.size()); // N.B. Put type explicitly
*this << static_cast<uint32_t>(str.size()); // N.B. Put type explicitly
for (auto c : str) *this << c;
return *this;
}
ByteMemoryInStream::ByteMemoryInStream(const std::vector<char> &data)
: m_storage(data) {
}
I::IStream& ByteMemoryInStream::operator>> (uint32_t &atom) {
IIStream& ByteMemoryInStream::operator>> (uint32_t &atom) {
check(sizeof(uint32_t));
uint8_t x[4];
x[0] = static_cast<uint8_t>(m_storage[m_idx++]);
@ -748,23 +811,38 @@ I::IStream& ByteMemoryInStream::operator>> (uint32_t &atom) {
atom = ((x[0]) | (x[1] << 8) | (x[2] << 16) | (x[3] << 24));
return *this;
}
I::IStream& ByteMemoryInStream::operator>> (bool& atom) {
IIStream& ByteMemoryInStream::operator>> (bool& atom) {
check(sizeof(char));
atom = (m_storage[m_idx++] == 0) ? false : true;
return *this;
}
I::IStream& ByteMemoryInStream::operator>> (char &atom) {
IIStream& ByteMemoryInStream::operator>> (std::vector<bool>::reference atom) {
check(sizeof(char));
atom = (m_storage[m_idx++] == 0) ? false : true;
return *this;
}
IIStream& ByteMemoryInStream::operator>> (char &atom) {
check(sizeof(char));
atom = m_storage[m_idx++];
return *this;
}
I::IStream& ByteMemoryInStream::operator>> (unsigned char &atom) {
IIStream& ByteMemoryInStream::operator>> (uint64_t &atom) {
check(sizeof(uint64_t));
uint8_t x[8];
atom = 0;
for (int i = 0; i < 8; ++i) {
x[i] = static_cast<uint8_t>(m_storage[m_idx++]);
atom |= (static_cast<uint64_t>(x[i]) << (i * 8));
}
return *this;
}
IIStream& ByteMemoryInStream::operator>> (unsigned char &atom) {
char c{};
*this >> c;
atom = static_cast<unsigned char>(c);
return *this;
}
I::IStream& ByteMemoryInStream::operator>> (short &atom) {
IIStream& ByteMemoryInStream::operator>> (short &atom) {
static_assert(sizeof(short) == 2, "Expecting sizeof(short) == 2");
check(sizeof(short));
uint8_t x[2];
@ -773,35 +851,35 @@ I::IStream& ByteMemoryInStream::operator>> (short &atom) {
atom = ((x[0]) | (x[1] << 8));
return *this;
}
I::IStream& ByteMemoryInStream::operator>> (unsigned short &atom) {
IIStream& ByteMemoryInStream::operator>> (unsigned short &atom) {
short s{};
*this >> s;
atom = static_cast<unsigned short>(s);
return *this;
}
I::IStream& ByteMemoryInStream::operator>> (int& atom) {
IIStream& ByteMemoryInStream::operator>> (int& atom) {
static_assert(sizeof(int) == 4, "Expecting sizeof(int) == 4");
atom = static_cast<int>(getU32());
return *this;
}
//I::IStream& ByteMemoryInStream::operator>> (std::size_t& atom) {
//IIStream& ByteMemoryInStream::operator>> (std::size_t& atom) {
// // NB. Type was truncated!
// atom = static_cast<std::size_t>(getU32());
// return *this;
//}
I::IStream& ByteMemoryInStream::operator>> (float& atom) {
IIStream& ByteMemoryInStream::operator>> (float& atom) {
static_assert(sizeof(float) == 4, "Expecting sizeof(float) == 4");
uint32_t tmp = ntohl(getU32());
memcpy(&atom, &tmp, sizeof(float));
return *this;
}
I::IStream& ByteMemoryInStream::operator>> (double& atom) {
IIStream& ByteMemoryInStream::operator>> (double& atom) {
static_assert(sizeof(double) == 8, "Expecting sizeof(double) == 8");
uint32_t tmp[2] = {ntohl(getU32()), ntohl(getU32())};
memcpy(&atom, tmp, sizeof(double));
return *this;
}
I::IStream& ByteMemoryInStream::operator>> (std::string& str) {
IIStream& ByteMemoryInStream::operator>> (std::string& str) {
//std::size_t sz = 0u;
uint32_t sz = 0u;
*this >> sz;
@ -814,24 +892,31 @@ I::IStream& ByteMemoryInStream::operator>> (std::string& str) {
return *this;
}
GAPI_EXPORTS void serialize(I::OStream& os, const cv::GMetaArgs &ma) {
GAPI_EXPORTS std::unique_ptr<IIStream> detail::getInStream(const std::vector<char> &p) {
return std::unique_ptr<ByteMemoryInStream>(new ByteMemoryInStream(p));
}
GAPI_EXPORTS void serialize(IOStream& os, const cv::GCompileArgs &ca) {
os << ca;
}
GAPI_EXPORTS void serialize(IOStream& os, const cv::GMetaArgs &ma) {
os << ma;
}
GAPI_EXPORTS void serialize(I::OStream& os, const cv::GRunArgs &ra) {
GAPI_EXPORTS void serialize(IOStream& os, const cv::GRunArgs &ra) {
os << ra;
}
GAPI_EXPORTS GMetaArgs meta_args_deserialize(I::IStream& is) {
GAPI_EXPORTS GMetaArgs meta_args_deserialize(IIStream& is) {
GMetaArgs s;
is >> s;
return s;
}
GAPI_EXPORTS GRunArgs run_args_deserialize(I::IStream& is) {
GAPI_EXPORTS GRunArgs run_args_deserialize(IIStream& is) {
GRunArgs s;
is >> s;
return s;
}
} // namespace s11n
} // namespace gimpl
} // namespace gapi
} // namespace cv

View File

@ -9,19 +9,21 @@
#include <iostream>
#include <fstream>
#include <string.h>
#include <string>
#include <map>
#include <ade/util/iota_range.hpp> // used in the vector<</>>
#include "compiler/gmodel.hpp"
#include "opencv2/gapi/render/render_types.hpp"
#include "opencv2/gapi/s11n.hpp" // basic interfaces
#if (defined _WIN32 || defined _WIN64) && defined _MSC_VER
#pragma warning(disable: 4702)
#endif
namespace cv {
namespace gimpl {
namespace gapi {
namespace s11n {
struct GSerialized {
@ -31,168 +33,107 @@ struct GSerialized {
cv::gimpl::Protocol m_proto;
};
////////////////////////////////////////////////////////////////////////////////
// Stream interfaces, so far temporary
namespace I {
struct GAPI_EXPORTS OStream {
virtual ~OStream() = default;
// Define the native support for basic C++ types at the API level:
virtual OStream& operator<< (bool) = 0;
virtual OStream& operator<< (char) = 0;
virtual OStream& operator<< (unsigned char) = 0;
virtual OStream& operator<< (short) = 0;
virtual OStream& operator<< (unsigned short) = 0;
virtual OStream& operator<< (int) = 0;
//virtual OStream& operator<< (std::size_t) = 0;
virtual OStream& operator<< (uint32_t) = 0;
virtual OStream& operator<< (float) = 0;
virtual OStream& operator<< (double) = 0;
virtual OStream& operator<< (const std::string&) = 0;
};
struct GAPI_EXPORTS IStream {
virtual ~IStream() = default;
virtual IStream& operator>> (bool &) = 0;
virtual IStream& operator>> (char &) = 0;
virtual IStream& operator>> (unsigned char &) = 0;
virtual IStream& operator>> (short &) = 0;
virtual IStream& operator>> (unsigned short &) = 0;
virtual IStream& operator>> (int &) = 0;
virtual IStream& operator>> (float &) = 0;
virtual IStream& operator>> (double &) = 0;
//virtual IStream& operator>> (std::size_t &) = 0;
virtual IStream& operator >> (uint32_t &) = 0;
virtual IStream& operator>> (std::string &) = 0;
};
} // namespace I
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
// S11N operators
// Note: operators for basic types are defined in IStream/OStream
// OpenCV types ////////////////////////////////////////////////////////////////
GAPI_EXPORTS I::OStream& operator<< (I::OStream& os, const cv::Point &pt);
GAPI_EXPORTS I::IStream& operator>> (I::IStream& is, cv::Point &pt);
GAPI_EXPORTS I::OStream& operator<< (I::OStream& os, const cv::Size &sz);
GAPI_EXPORTS I::IStream& operator>> (I::IStream& is, cv::Size &sz);
GAPI_EXPORTS I::OStream& operator<< (I::OStream& os, const cv::Rect &rc);
GAPI_EXPORTS I::IStream& operator>> (I::IStream& is, cv::Rect &rc);
GAPI_EXPORTS I::OStream& operator<< (I::OStream& os, const cv::Scalar &s);
GAPI_EXPORTS I::IStream& operator>> (I::IStream& is, cv::Scalar &s);
GAPI_EXPORTS I::OStream& operator<< (I::OStream& os, const cv::Mat &m);
GAPI_EXPORTS I::IStream& operator>> (I::IStream& is, cv::Mat &m);
GAPI_EXPORTS I::OStream& operator<< (I::OStream& os, const cv::gapi::wip::draw::Text &t);
GAPI_EXPORTS I::IStream& operator>> (I::IStream& is, cv::gapi::wip::draw::Text &t);
GAPI_EXPORTS I::OStream& operator<< (I::OStream&, const cv::gapi::wip::draw::FText &);
GAPI_EXPORTS I::IStream& operator>> (I::IStream&, cv::gapi::wip::draw::FText &);
GAPI_EXPORTS I::OStream& operator<< (I::OStream& os, const cv::gapi::wip::draw::Circle &c);
GAPI_EXPORTS I::IStream& operator>> (I::IStream& is, cv::gapi::wip::draw::Circle &c);
GAPI_EXPORTS I::OStream& operator<< (I::OStream& os, const cv::gapi::wip::draw::Rect &r);
GAPI_EXPORTS I::IStream& operator>> (I::IStream& is, cv::gapi::wip::draw::Rect &r);
GAPI_EXPORTS I::OStream& operator<< (I::OStream& os, const cv::gapi::wip::draw::Image &i);
GAPI_EXPORTS I::IStream& operator>> (I::IStream& is, cv::gapi::wip::draw::Image &i);
GAPI_EXPORTS I::OStream& operator<< (I::OStream& os, const cv::gapi::wip::draw::Mosaic &m);
GAPI_EXPORTS I::IStream& operator>> (I::IStream& is, cv::gapi::wip::draw::Mosaic &m);
GAPI_EXPORTS I::OStream& operator<< (I::OStream& os, const cv::gapi::wip::draw::Poly &p);
GAPI_EXPORTS I::IStream& operator>> (I::IStream& is, cv::gapi::wip::draw::Poly &p);
GAPI_EXPORTS I::OStream& operator<< (I::OStream& os, const cv::gapi::wip::draw::Line &l);
GAPI_EXPORTS I::IStream& operator>> (I::IStream& is, cv::gapi::wip::draw::Line &l);
// Note: operators for basic types are defined in IIStream/IOStream
// G-API types /////////////////////////////////////////////////////////////////
GAPI_EXPORTS I::OStream& operator<< (I::OStream& os, cv::util::monostate );
GAPI_EXPORTS I::IStream& operator>> (I::IStream& is, cv::util::monostate &);
GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::GCompileArg& arg);
GAPI_EXPORTS I::OStream& operator<< (I::OStream& os, cv::GShape shape);
GAPI_EXPORTS I::IStream& operator>> (I::IStream& is, cv::GShape &shape);
GAPI_EXPORTS IOStream& operator<< (IOStream& os, cv::util::monostate );
GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::util::monostate &);
GAPI_EXPORTS I::OStream& operator<< (I::OStream& os, cv::detail::ArgKind k);
GAPI_EXPORTS I::IStream& operator>> (I::IStream& is, cv::detail::ArgKind &k);
GAPI_EXPORTS IOStream& operator<< (IOStream& os, cv::GShape shape);
GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::GShape &shape);
GAPI_EXPORTS I::OStream& operator<< (I::OStream& os, cv::detail::OpaqueKind k);
GAPI_EXPORTS I::IStream& operator>> (I::IStream& is, cv::detail::OpaqueKind &k);
GAPI_EXPORTS IOStream& operator<< (IOStream& os, cv::detail::ArgKind k);
GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::detail::ArgKind &k);
GAPI_EXPORTS I::OStream& operator<< (I::OStream& os, cv::gimpl::Data::Storage s);
GAPI_EXPORTS I::IStream& operator>> (I::IStream& is, cv::gimpl::Data::Storage &s);
GAPI_EXPORTS IOStream& operator<< (IOStream& os, cv::detail::OpaqueKind k);
GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::detail::OpaqueKind &k);
GAPI_EXPORTS I::OStream& operator<< (I::OStream& os, const cv::gimpl::DataObjectCounter &c);
GAPI_EXPORTS I::IStream& operator>> (I::IStream& is, cv::gimpl::DataObjectCounter &c);
GAPI_EXPORTS IOStream& operator<< (IOStream& os, cv::gimpl::Data::Storage s);
GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::gimpl::Data::Storage &s);
GAPI_EXPORTS I::OStream& operator<< (I::OStream& os, const cv::gimpl::Protocol &p);
GAPI_EXPORTS I::IStream& operator>> (I::IStream& is, cv::gimpl::Protocol &p);
GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::gimpl::DataObjectCounter &c);
GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::gimpl::DataObjectCounter &c);
GAPI_EXPORTS I::OStream& operator<< (I::OStream& os, const cv::GArg &arg);
GAPI_EXPORTS I::IStream& operator>> (I::IStream& is, cv::GArg &arg);
GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::gimpl::Protocol &p);
GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::gimpl::Protocol &p);
GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::GArg &arg);
GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::GArg &arg);
//Forward declaration
//I::OStream& operator<< (I::OStream& os, const cv::GRunArg &arg);
//I::IStream& operator>> (I::IStream& is, cv::GRunArg &arg);
//IOStream& operator<< (IOStream& os, const cv::GRunArg &arg);
//IIStream& operator>> (IIStream& is, cv::GRunArg &arg);
GAPI_EXPORTS I::OStream& operator<< (I::OStream& os, const cv::GKernel &k);
GAPI_EXPORTS I::IStream& operator>> (I::IStream& is, cv::GKernel &k);
GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::GKernel &k);
GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::GKernel &k);
GAPI_EXPORTS I::OStream& operator<< (I::OStream& os, const cv::GMatDesc &d);
GAPI_EXPORTS I::IStream& operator>> (I::IStream& is, cv::GMatDesc &d);
GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::GMatDesc &d);
GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::GMatDesc &d);
GAPI_EXPORTS I::OStream& operator<< (I::OStream& os, const cv::GScalarDesc &);
GAPI_EXPORTS I::IStream& operator>> (I::IStream& is, cv::GScalarDesc &);
GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::GScalarDesc &);
GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::GScalarDesc &);
GAPI_EXPORTS I::OStream& operator<< (I::OStream& os, const cv::GOpaqueDesc &);
GAPI_EXPORTS I::IStream& operator>> (I::IStream& is, cv::GOpaqueDesc &);
GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::GOpaqueDesc &);
GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::GOpaqueDesc &);
GAPI_EXPORTS I::OStream& operator<< (I::OStream& os, const cv::GArrayDesc &);
GAPI_EXPORTS I::IStream& operator>> (I::IStream& is, cv::GArrayDesc &);
GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::GArrayDesc &);
GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::GArrayDesc &);
#if !defined(GAPI_STANDALONE)
GAPI_EXPORTS I::OStream& operator<< (I::OStream& os, const cv::UMat &);
GAPI_EXPORTS I::IStream& operator>> (I::IStream& is, cv::UMat &);
#endif // !defined(GAPI_STANDALONE)
GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::GFrameDesc &);
GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::GFrameDesc &);
GAPI_EXPORTS I::OStream& operator<< (I::OStream& os, const cv::gapi::wip::IStreamSource::Ptr &);
GAPI_EXPORTS I::IStream& operator>> (I::IStream& is, cv::gapi::wip::IStreamSource::Ptr &);
GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::gimpl::RcDesc &rc);
GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::gimpl::RcDesc &rc);
GAPI_EXPORTS I::OStream& operator<< (I::OStream& os, const cv::detail::VectorRef &);
GAPI_EXPORTS I::IStream& operator>> (I::IStream& is, cv::detail::VectorRef &);
GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::gimpl::Op &op);
GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::gimpl::Op &op);
GAPI_EXPORTS I::OStream& operator<< (I::OStream& os, const cv::detail::OpaqueRef &);
GAPI_EXPORTS I::IStream& operator>> (I::IStream& is, cv::detail::OpaqueRef &);
GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::gimpl::Data &op);
GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::gimpl::Data &op);
GAPI_EXPORTS I::OStream& operator<< (I::OStream& os, const cv::gimpl::RcDesc &rc);
GAPI_EXPORTS I::IStream& operator>> (I::IStream& is, cv::gimpl::RcDesc &rc);
// Render types ////////////////////////////////////////////////////////////////
GAPI_EXPORTS I::OStream& operator<< (I::OStream& os, const cv::gimpl::Op &op);
GAPI_EXPORTS I::IStream& operator>> (I::IStream& is, cv::gimpl::Op &op);
GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::gapi::wip::draw::Text &t);
GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::gapi::wip::draw::Text &t);
GAPI_EXPORTS I::OStream& operator<< (I::OStream& os, const cv::gimpl::Data &op);
GAPI_EXPORTS I::IStream& operator>> (I::IStream& is, cv::gimpl::Data &op);
GAPI_EXPORTS IOStream& operator<< (IOStream&, const cv::gapi::wip::draw::FText &);
GAPI_EXPORTS IIStream& operator>> (IIStream&, cv::gapi::wip::draw::FText &);
GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::gapi::wip::draw::Circle &c);
GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::gapi::wip::draw::Circle &c);
GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::gapi::wip::draw::Rect &r);
GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::gapi::wip::draw::Rect &r);
GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::gapi::wip::draw::Image &i);
GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::gapi::wip::draw::Image &i);
GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::gapi::wip::draw::Mosaic &m);
GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::gapi::wip::draw::Mosaic &m);
GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::gapi::wip::draw::Poly &p);
GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::gapi::wip::draw::Poly &p);
GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::gapi::wip::draw::Line &l);
GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::gapi::wip::draw::Line &l);
// The top-level serialization routine.
// Note it is just a single function which takes a GModel and a list of nodes
// and writes the data to the stream (recursively)
GAPI_EXPORTS void serialize( I::OStream& os
GAPI_EXPORTS void serialize( IOStream& os
, const ade::Graph &g
, const std::vector<ade::NodeHandle> &nodes);
// The top-level serialization routine.
// Note it is just a single function which takes a GModel and a list of nodes
// and writes the data to the stream (recursively)
GAPI_EXPORTS void serialize( I::OStream& os
GAPI_EXPORTS void serialize( IOStream& os
, const ade::Graph &g
, const cv::gimpl::Protocol &p
, const std::vector<ade::NodeHandle> &nodes);
@ -214,160 +155,73 @@ GAPI_EXPORTS void serialize( I::OStream& os
// Summarizing, the `deserialize()` happens *once per GComputation* immediately
// during the cv::gapi::deserialize<GComputation>(), and `reconstruct()` happens
// on every compilation process issued for this GComputation.
GAPI_EXPORTS GSerialized deserialize(I::IStream& is);
GAPI_EXPORTS GSerialized deserialize(IIStream& is);
GAPI_EXPORTS void reconstruct(const GSerialized &s, ade::Graph &g);
// Legacy //////////////////////////////////////////////////////////////////////
// Generic: unordered_map serialization ////////////////////////////////////////
template<typename K, typename V>
I::OStream& operator<< (I::OStream& os, const std::unordered_map<K, V> &m) {
//const std::size_t sz = m.size(); // explicitly specify type
const uint32_t sz = (uint32_t)m.size(); // explicitly specify type
os << sz;
for (auto &&it : m) os << it.first << it.second;
return os;
}
template<typename K, typename V>
I::IStream& operator>> (I::IStream& is, std::unordered_map<K, V> &m) {
m.clear();
//std::size_t sz = 0u;
uint32_t sz = 0u;
is >> sz;
if (sz != 0u) {
for (auto &&i : ade::util::iota(sz)) {
(void) i;
K k{};
V v{};
is >> k >> v;
m.insert({k,v});
}
GAPI_Assert(sz == m.size());
}
return is;
}
// Generic: variant serialization //////////////////////////////////////////////
namespace detail { // FIXME: breaks old code
template<typename V>
I::OStream& put_v(I::OStream&, const V&, std::size_t) {
GAPI_Assert(false && "variant>>: requested index is invalid");
};
template<typename V, typename X, typename... Xs>
I::OStream& put_v(I::OStream& os, const V& v, std::size_t x) {
return (x == 0u)
? os << cv::util::get<X>(v)
: put_v<V, Xs...>(os, v, x-1);
}
template<typename V>
I::IStream& get_v(I::IStream&, V&, std::size_t, std::size_t) {
GAPI_Assert(false && "variant<<: requested index is invalid");
}
template<typename V, typename X, typename... Xs>
I::IStream& get_v(I::IStream& is, V& v, std::size_t i, std::size_t gi) {
if (i == gi) {
X x{};
is >> x;
v = std::move(x);
return is;
} else return get_v<V, Xs...>(is, v, i+1, gi);
}
} // namespace detail FIXME: breaks old code
template<typename... Ts>
I::OStream& operator<< (I::OStream& os, const cv::util::variant<Ts...> &v) {
os << (uint32_t)v.index();
return detail::put_v<cv::util::variant<Ts...>, Ts...>(os, v, v.index());
}
template<typename... Ts>
I::IStream& operator>> (I::IStream& is, cv::util::variant<Ts...> &v) {
int idx = -1;
is >> idx;
GAPI_Assert(idx >= 0 && idx < (int)sizeof...(Ts));
return detail::get_v<cv::util::variant<Ts...>, Ts...>(is, v, 0u, idx);
}
// Generic: vector serialization ///////////////////////////////////////////////
// Moved here to fix CLang issues https://clang.llvm.org/compatibility.html
// Unqualified lookup in templates
template<typename T>
I::OStream& operator<< (I::OStream& os, const std::vector<T> &ts) {
//const std::size_t sz = ts.size(); // explicitly specify type
const uint32_t sz = (uint32_t)ts.size(); // explicitly specify type
os << sz;
for (auto &&v : ts) os << v;
return os;
}
template<typename T>
I::IStream& operator >> (I::IStream& is, std::vector<T> &ts) {
//std::size_t sz = 0u;
uint32_t sz = 0u;
is >> sz;
if (sz == 0u) {
ts.clear();
}
else {
ts.resize(sz);
for (auto &&i : ade::util::iota(sz)) is >> ts[i];
}
return is;
}
// FIXME: Basic Stream implementaions //////////////////////////////////////////
// Basic in-memory stream implementations.
class GAPI_EXPORTS ByteMemoryOutStream final: public I::OStream {
class GAPI_EXPORTS ByteMemoryOutStream final: public IOStream {
std::vector<char> m_storage;
//virtual I::OStream& operator << (uint32_t) override;
//virtual I::OStream& operator<< (uint32_t) final;
//virtual IOStream& operator << (uint32_t) override;
//virtual IOStream& operator<< (uint32_t) final;
public:
const std::vector<char>& data() const;
virtual I::OStream& operator<< (bool) override;
virtual I::OStream& operator<< (char) override;
virtual I::OStream& operator<< (unsigned char) override;
virtual I::OStream& operator<< (short) override;
virtual I::OStream& operator<< (unsigned short) override;
virtual I::OStream& operator<< (int) override;
//virtual I::OStream& operator<< (std::size_t) override;
virtual I::OStream& operator<< (float) override;
virtual I::OStream& operator<< (double) override;
virtual I::OStream& operator<< (const std::string&) override;
virtual I::OStream& operator<< (uint32_t) override;
virtual IOStream& operator<< (bool) override;
virtual IOStream& operator<< (char) override;
virtual IOStream& operator<< (unsigned char) override;
virtual IOStream& operator<< (short) override;
virtual IOStream& operator<< (unsigned short) override;
virtual IOStream& operator<< (int) override;
//virtual IOStream& operator<< (std::size_t) override;
virtual IOStream& operator<< (float) override;
virtual IOStream& operator<< (double) override;
virtual IOStream& operator<< (const std::string&) override;
virtual IOStream& operator<< (uint32_t) override;
virtual IOStream& operator<< (uint64_t) override;
};
class GAPI_EXPORTS ByteMemoryInStream final: public I::IStream {
class GAPI_EXPORTS ByteMemoryInStream final: public IIStream {
const std::vector<char>& m_storage;
size_t m_idx = 0u;
void check(std::size_t n) { (void) n; GAPI_DbgAssert(m_idx+n-1 < m_storage.size()); }
uint32_t getU32() { uint32_t v{}; *this >> v; return v; };
//virtual I::IStream& operator>> (uint32_t &) final;
//virtual IIStream& operator>> (uint32_t &) final;
public:
explicit ByteMemoryInStream(const std::vector<char> &data);
virtual I::IStream& operator>> (bool &) override;
virtual I::IStream& operator>> (char &) override;
virtual I::IStream& operator>> (unsigned char &) override;
virtual I::IStream& operator>> (short &) override;
virtual I::IStream& operator>> (unsigned short &) override;
virtual I::IStream& operator>> (int &) override;
virtual I::IStream& operator>> (float &) override;
virtual I::IStream& operator>> (double &) override;
//virtual I::IStream& operator>> (std::size_t &) override;
virtual I::IStream& operator >> (uint32_t &) override;
virtual I::IStream& operator>> (std::string &) override;
virtual IIStream& operator>> (bool &) override;
virtual IIStream& operator>> (std::vector<bool>::reference) override;
virtual IIStream& operator>> (char &) override;
virtual IIStream& operator>> (unsigned char &) override;
virtual IIStream& operator>> (short &) override;
virtual IIStream& operator>> (unsigned short &) override;
virtual IIStream& operator>> (int &) override;
virtual IIStream& operator>> (float &) override;
virtual IIStream& operator>> (double &) override;
//virtual IIStream& operator>> (std::size_t &) override;
virtual IIStream& operator >> (uint32_t &) override;
virtual IIStream& operator >> (uint64_t &) override;
virtual IIStream& operator>> (std::string &) override;
};
GAPI_EXPORTS void serialize(I::OStream& os, const cv::GMetaArgs &ma);
GAPI_EXPORTS void serialize(I::OStream& os, const cv::GRunArgs &ra);
GAPI_EXPORTS GMetaArgs meta_args_deserialize(I::IStream& is);
GAPI_EXPORTS GRunArgs run_args_deserialize(I::IStream& is);
namespace detail {
GAPI_EXPORTS std::unique_ptr<IIStream> getInStream(const std::vector<char> &p);
} // namespace detail
GAPI_EXPORTS void serialize(IOStream& os, const cv::GCompileArgs &ca);
GAPI_EXPORTS void serialize(IOStream& os, const cv::GMetaArgs &ma);
GAPI_EXPORTS void serialize(IOStream& os, const cv::GRunArgs &ra);
GAPI_EXPORTS GMetaArgs meta_args_deserialize(IIStream& is);
GAPI_EXPORTS GRunArgs run_args_deserialize(IIStream& is);
} // namespace s11n
} // namespace gimpl
} // namespace gapi
} // namespace cv
#endif // OPENCV_GAPI_COMMON_SERIALIZATION_HPP

View File

@ -128,9 +128,10 @@ cv::GArg cv::gimpl::GCPUExecutable::packArg(const GArg &arg)
// No API placeholders allowed at this point
// FIXME: this check has to be done somewhere in compilation stage.
GAPI_Assert( arg.kind != cv::detail::ArgKind::GMAT
&& arg.kind != cv::detail::ArgKind::GSCALAR
&& arg.kind != cv::detail::ArgKind::GARRAY
&& arg.kind != cv::detail::ArgKind::GOPAQUE);
&& arg.kind != cv::detail::ArgKind::GSCALAR
&& arg.kind != cv::detail::ArgKind::GARRAY
&& arg.kind != cv::detail::ArgKind::GOPAQUE
&& arg.kind != cv::detail::ArgKind::GFRAME);
if (arg.kind != cv::detail::ArgKind::GOBJREF)
{
@ -150,6 +151,7 @@ cv::GArg cv::gimpl::GCPUExecutable::packArg(const GArg &arg)
// (and constructed by either bindIn/Out or resetInternal)
case GShape::GARRAY: return GArg(m_res.slot<cv::detail::VectorRef>().at(ref.id));
case GShape::GOPAQUE: return GArg(m_res.slot<cv::detail::OpaqueRef>().at(ref.id));
case GShape::GFRAME: return GArg(m_res.slot<cv::MediaFrame>().at(ref.id));
default:
util::throw_error(std::logic_error("Unsupported GShape type"));
break;
@ -276,4 +278,8 @@ void cv::gimpl::GCPUExecutable::run(std::vector<InObj> &&input_objs,
} // for(m_script)
for (auto &it : output_objs) magazine::writeBack(m_res, it.first, it.second);
// In/Out args clean-up is mandatory now with RMat
for (auto &it : input_objs) magazine::unbind(m_res, it.first);
for (auto &it : output_objs) magazine::unbind(m_res, it.first);
}

View File

@ -2,7 +2,7 @@
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Copyright (C) 2018 Intel Corporation
// Copyright (C) 2018-2020 Intel Corporation
#include "precomp.hpp"
@ -342,6 +342,14 @@ GAPI_OCV_KERNEL(GCPUSum, cv::gapi::core::GSum)
}
};
GAPI_OCV_KERNEL(GCPUCountNonZero, cv::gapi::core::GCountNonZero)
{
static void run(const cv::Mat& in, int& out)
{
out = cv::countNonZero(in);
}
};
GAPI_OCV_KERNEL(GCPUAddW, cv::gapi::core::GAddW)
{
static void run(const cv::Mat& in1, double alpha, const cv::Mat& in2, double beta, double gamma, int dtype, cv::Mat& out)
@ -502,14 +510,6 @@ GAPI_OCV_KERNEL(GCPUCrop, cv::gapi::core::GCrop)
}
};
GAPI_OCV_KERNEL(GCPUCopy, cv::gapi::core::GCopy)
{
static void run(const cv::Mat& in, cv::Mat& out)
{
in.copyTo(out);
}
};
GAPI_OCV_KERNEL(GCPUConcatHor, cv::gapi::core::GConcatHor)
{
static void run(const cv::Mat& in1, const cv::Mat& in2, cv::Mat& out)
@ -577,6 +577,63 @@ GAPI_OCV_KERNEL(GCPUWarpAffine, cv::gapi::core::GWarpAffine)
}
};
GAPI_OCV_KERNEL(GCPUKMeansND, cv::gapi::core::GKMeansND)
{
static void run(const cv::Mat& data, const int K, const cv::Mat& inBestLabels,
const cv::TermCriteria& criteria, const int attempts,
const cv::KmeansFlags flags,
double& compactness, cv::Mat& outBestLabels, cv::Mat& centers)
{
if (flags & cv::KMEANS_USE_INITIAL_LABELS)
{
inBestLabels.copyTo(outBestLabels);
}
compactness = cv::kmeans(data, K, outBestLabels, criteria, attempts, flags, centers);
}
};
GAPI_OCV_KERNEL(GCPUKMeansNDNoInit, cv::gapi::core::GKMeansNDNoInit)
{
static void run(const cv::Mat& data, const int K, const cv::TermCriteria& criteria,
const int attempts, const cv::KmeansFlags flags,
double& compactness, cv::Mat& outBestLabels, cv::Mat& centers)
{
compactness = cv::kmeans(data, K, outBestLabels, criteria, attempts, flags, centers);
}
};
GAPI_OCV_KERNEL(GCPUKMeans2D, cv::gapi::core::GKMeans2D)
{
static void run(const std::vector<cv::Point2f>& data, const int K,
const std::vector<int>& inBestLabels, const cv::TermCriteria& criteria,
const int attempts, const cv::KmeansFlags flags,
double& compactness, std::vector<int>& outBestLabels,
std::vector<cv::Point2f>& centers)
{
if (flags & cv::KMEANS_USE_INITIAL_LABELS)
{
outBestLabels = inBestLabels;
}
compactness = cv::kmeans(data, K, outBestLabels, criteria, attempts, flags, centers);
}
};
GAPI_OCV_KERNEL(GCPUKMeans3D, cv::gapi::core::GKMeans3D)
{
static void run(const std::vector<cv::Point3f>& data, const int K,
const std::vector<int>& inBestLabels, const cv::TermCriteria& criteria,
const int attempts, const cv::KmeansFlags flags,
double& compactness, std::vector<int>& outBestLabels,
std::vector<cv::Point3f>& centers)
{
if (flags & cv::KMEANS_USE_INITIAL_LABELS)
{
outBestLabels = inBestLabels;
}
compactness = cv::kmeans(data, K, outBestLabels, criteria, attempts, flags, centers);
}
};
GAPI_OCV_KERNEL(GCPUParseSSDBL, cv::gapi::nn::parsers::GParseSSDBL)
{
static void run(const cv::Mat& in_ssd_result,
@ -617,7 +674,7 @@ GAPI_OCV_KERNEL(GCPUParseYolo, cv::gapi::nn::parsers::GParseYolo)
}
};
GAPI_OCV_KERNEL(GCPUSize, cv::gapi::core::GSize)
GAPI_OCV_KERNEL(GCPUSize, cv::gapi::streaming::GSize)
{
static void run(const cv::Mat& in, cv::Size& out)
{
@ -626,7 +683,7 @@ GAPI_OCV_KERNEL(GCPUSize, cv::gapi::core::GSize)
}
};
GAPI_OCV_KERNEL(GCPUSizeR, cv::gapi::core::GSizeR)
GAPI_OCV_KERNEL(GCPUSizeR, cv::gapi::streaming::GSizeR)
{
static void run(const cv::Rect& in, cv::Size& out)
{
@ -679,6 +736,7 @@ cv::gapi::GKernelPackage cv::gapi::core::cpu::kernels()
, GCPUAbsDiff
, GCPUAbsDiffC
, GCPUSum
, GCPUCountNonZero
, GCPUAddW
, GCPUNormL1
, GCPUNormL2
@ -696,7 +754,6 @@ cv::gapi::GKernelPackage cv::gapi::core::cpu::kernels()
, GCPURemap
, GCPUFlip
, GCPUCrop
, GCPUCopy
, GCPUConcatHor
, GCPUConcatVert
, GCPULUT
@ -705,6 +762,10 @@ cv::gapi::GKernelPackage cv::gapi::core::cpu::kernels()
, GCPUNormalize
, GCPUWarpPerspective
, GCPUWarpAffine
, GCPUKMeansND
, GCPUKMeansNDNoInit
, GCPUKMeans2D
, GCPUKMeans3D
, GCPUParseSSDBL
, GOCVParseSSD
, GCPUParseYolo

View File

@ -145,6 +145,16 @@ GAPI_OCV_KERNEL(GCPUDilate, cv::gapi::imgproc::GDilate)
}
};
GAPI_OCV_KERNEL(GCPUMorphologyEx, cv::gapi::imgproc::GMorphologyEx)
{
static void run(const cv::Mat &in, const cv::MorphTypes op, const cv::Mat &kernel,
const cv::Point &anchor, const int iterations,
const cv::BorderTypes borderType, const cv::Scalar &borderValue, cv::Mat &out)
{
cv::morphologyEx(in, out, op, kernel, anchor, iterations, borderType, borderValue);
}
};
GAPI_OCV_KERNEL(GCPUSobel, cv::gapi::imgproc::GSobel)
{
static void run(const cv::Mat& in, int ddepth, int dx, int dy, int ksize, double scale, double delta, int borderType,
@ -211,6 +221,182 @@ GAPI_OCV_KERNEL(GCPUGoodFeatures, cv::gapi::imgproc::GGoodFeatures)
}
};
GAPI_OCV_KERNEL(GCPUFindContours, cv::gapi::imgproc::GFindContours)
{
static void run(const cv::Mat& image, const cv::RetrievalModes mode,
const cv::ContourApproximationModes method, const cv::Point& offset,
std::vector<std::vector<cv::Point>> &outConts)
{
cv::findContours(image, outConts, mode, method, offset);
}
};
GAPI_OCV_KERNEL(GCPUFindContoursNoOffset, cv::gapi::imgproc::GFindContoursNoOffset)
{
static void run(const cv::Mat& image, const cv::RetrievalModes mode,
const cv::ContourApproximationModes method,
std::vector<std::vector<cv::Point>> &outConts)
{
cv::findContours(image, outConts, mode, method);
}
};
GAPI_OCV_KERNEL(GCPUFindContoursH, cv::gapi::imgproc::GFindContoursH)
{
static void run(const cv::Mat& image, const cv::RetrievalModes mode,
const cv::ContourApproximationModes method, const cv::Point& offset,
std::vector<std::vector<cv::Point>> &outConts, std::vector<cv::Vec4i> &outHier)
{
cv::findContours(image, outConts, outHier, mode, method, offset);
}
};
GAPI_OCV_KERNEL(GCPUFindContoursHNoOffset, cv::gapi::imgproc::GFindContoursHNoOffset)
{
static void run(const cv::Mat& image, const cv::RetrievalModes mode,
const cv::ContourApproximationModes method,
std::vector<std::vector<cv::Point>> &outConts, std::vector<cv::Vec4i> &outHier)
{
cv::findContours(image, outConts, outHier, mode, method);
}
};
GAPI_OCV_KERNEL(GCPUBoundingRectMat, cv::gapi::imgproc::GBoundingRectMat)
{
static void run(const cv::Mat& in, cv::Rect& out)
{
out = cv::boundingRect(in);
}
};
GAPI_OCV_KERNEL(GCPUBoundingRectVector32S, cv::gapi::imgproc::GBoundingRectVector32S)
{
static void run(const std::vector<cv::Point2i>& in, cv::Rect& out)
{
out = cv::boundingRect(in);
}
};
GAPI_OCV_KERNEL(GCPUBoundingRectVector32F, cv::gapi::imgproc::GBoundingRectVector32F)
{
static void run(const std::vector<cv::Point2f>& in, cv::Rect& out)
{
out = cv::boundingRect(in);
}
};
GAPI_OCV_KERNEL(GCPUFitLine2DMat, cv::gapi::imgproc::GFitLine2DMat)
{
static void run(const cv::Mat& in, const cv::DistanceTypes distType, const double param,
const double reps, const double aeps, cv::Vec4f& out)
{
cv::fitLine(in, out, distType, param, reps, aeps);
}
};
GAPI_OCV_KERNEL(GCPUFitLine2DVector32S, cv::gapi::imgproc::GFitLine2DVector32S)
{
static void run(const std::vector<cv::Point2i>& in, const cv::DistanceTypes distType,
const double param, const double reps, const double aeps, cv::Vec4f& out)
{
cv::fitLine(in, out, distType, param, reps, aeps);
}
};
GAPI_OCV_KERNEL(GCPUFitLine2DVector32F, cv::gapi::imgproc::GFitLine2DVector32F)
{
static void run(const std::vector<cv::Point2f>& in, const cv::DistanceTypes distType,
const double param, const double reps, const double aeps, cv::Vec4f& out)
{
cv::fitLine(in, out, distType, param, reps, aeps);
}
};
GAPI_OCV_KERNEL(GCPUFitLine2DVector64F, cv::gapi::imgproc::GFitLine2DVector64F)
{
static void run(const std::vector<cv::Point2d>& in, const cv::DistanceTypes distType,
const double param, const double reps, const double aeps, cv::Vec4f& out)
{
cv::fitLine(in, out, distType, param, reps, aeps);
}
};
GAPI_OCV_KERNEL(GCPUFitLine3DMat, cv::gapi::imgproc::GFitLine3DMat)
{
static void run(const cv::Mat& in, const cv::DistanceTypes distType, const double param,
const double reps, const double aeps, cv::Vec6f& out)
{
cv::fitLine(in, out, distType, param, reps, aeps);
}
};
GAPI_OCV_KERNEL(GCPUFitLine3DVector32S, cv::gapi::imgproc::GFitLine3DVector32S)
{
static void run(const std::vector<cv::Point3i>& in, const cv::DistanceTypes distType,
const double param, const double reps, const double aeps, cv::Vec6f& out)
{
cv::fitLine(in, out, distType, param, reps, aeps);
}
};
GAPI_OCV_KERNEL(GCPUFitLine3DVector32F, cv::gapi::imgproc::GFitLine3DVector32F)
{
static void run(const std::vector<cv::Point3f>& in, const cv::DistanceTypes distType,
const double param, const double reps, const double aeps, cv::Vec6f& out)
{
cv::fitLine(in, out, distType, param, reps, aeps);
}
};
GAPI_OCV_KERNEL(GCPUFitLine3DVector64F, cv::gapi::imgproc::GFitLine3DVector64F)
{
static void run(const std::vector<cv::Point3d>& in, const cv::DistanceTypes distType,
const double param, const double reps, const double aeps, cv::Vec6f& out)
{
cv::fitLine(in, out, distType, param, reps, aeps);
}
};
GAPI_OCV_KERNEL(GCPUBGR2RGB, cv::gapi::imgproc::GBGR2RGB)
{
static void run(const cv::Mat& in, cv::Mat &out)
{
cv::cvtColor(in, out, cv::COLOR_BGR2RGB);
}
};
GAPI_OCV_KERNEL(GCPUBGR2I420, cv::gapi::imgproc::GBGR2I420)
{
static void run(const cv::Mat& in, cv::Mat &out)
{
cv::cvtColor(in, out, cv::COLOR_BGR2YUV_I420);
}
};
GAPI_OCV_KERNEL(GCPURGB2I420, cv::gapi::imgproc::GRGB2I420)
{
static void run(const cv::Mat& in, cv::Mat &out)
{
cv::cvtColor(in, out, cv::COLOR_RGB2YUV_I420);
}
};
GAPI_OCV_KERNEL(GCPUI4202BGR, cv::gapi::imgproc::GI4202BGR)
{
static void run(const cv::Mat& in, cv::Mat &out)
{
cv::cvtColor(in, out, cv::COLOR_YUV2BGR_I420);
}
};
GAPI_OCV_KERNEL(GCPUI4202RGB, cv::gapi::imgproc::GI4202RGB)
{
static void run(const cv::Mat& in, cv::Mat &out)
{
cv::cvtColor(in, out, cv::COLOR_YUV2RGB_I420);
}
};
GAPI_OCV_KERNEL(GCPURGB2YUV, cv::gapi::imgproc::GRGB2YUV)
{
static void run(const cv::Mat& in, cv::Mat &out)
@ -438,6 +624,7 @@ cv::gapi::GKernelPackage cv::gapi::imgproc::cpu::kernels()
, GCPUMedianBlur
, GCPUErode
, GCPUDilate
, GCPUMorphologyEx
, GCPUSobel
, GCPUSobelXY
, GCPULaplacian
@ -445,8 +632,28 @@ cv::gapi::GKernelPackage cv::gapi::imgproc::cpu::kernels()
, GCPUCanny
, GCPUGoodFeatures
, GCPUEqualizeHist
, GCPUFindContours
, GCPUFindContoursNoOffset
, GCPUFindContoursH
, GCPUFindContoursHNoOffset
, GCPUBGR2RGB
, GCPURGB2YUV
, GCPUBoundingRectMat
, GCPUBoundingRectVector32S
, GCPUBoundingRectVector32F
, GCPUFitLine2DMat
, GCPUFitLine2DVector32S
, GCPUFitLine2DVector32F
, GCPUFitLine2DVector64F
, GCPUFitLine3DMat
, GCPUFitLine3DVector32S
, GCPUFitLine3DVector32F
, GCPUFitLine3DVector64F
, GCPUYUV2RGB
, GCPUBGR2I420
, GCPURGB2I420
, GCPUI4202BGR
, GCPUI4202RGB
, GCPUNV12toRGB
, GCPUNV12toBGR
, GCPURGB2Lab

View File

@ -41,6 +41,11 @@ cv::detail::OpaqueRef& cv::GCPUContext::outOpaqueRef(int output)
return util::get<cv::detail::OpaqueRef>(m_results.at(output));
}
cv::MediaFrame& cv::GCPUContext::outFrame(int output)
{
return *util::get<cv::MediaFrame*>(m_results.at(output));
}
cv::GCPUKernel::GCPUKernel()
{
}

View File

@ -80,12 +80,109 @@ GAPI_OCV_KERNEL(GCPUCalcOptFlowLKForPyr, cv::gapi::video::GCalcOptFlowLKForPyr)
}
};
GAPI_OCV_KERNEL_ST(GCPUBackgroundSubtractor,
cv::gapi::video::GBackgroundSubtractor,
cv::BackgroundSubtractor)
{
static void setup(const cv::GMatDesc&, const cv::gapi::video::BackgroundSubtractorParams& bsParams,
std::shared_ptr<cv::BackgroundSubtractor>& state,
const cv::GCompileArgs&)
{
if (bsParams.operation == cv::gapi::video::TYPE_BS_MOG2)
state = cv::createBackgroundSubtractorMOG2(bsParams.history,
bsParams.threshold,
bsParams.detectShadows);
else if (bsParams.operation == cv::gapi::video::TYPE_BS_KNN)
state = cv::createBackgroundSubtractorKNN(bsParams.history,
bsParams.threshold,
bsParams.detectShadows);
GAPI_Assert(state);
}
static void run(const cv::Mat& in, const cv::gapi::video::BackgroundSubtractorParams& bsParams,
cv::Mat &out, cv::BackgroundSubtractor& state)
{
state.apply(in, out, bsParams.learningRate);
}
};
GAPI_OCV_KERNEL_ST(GCPUKalmanFilter, cv::gapi::video::GKalmanFilter, cv::KalmanFilter)
{
static void setup(const cv::GMatDesc&, const cv::GOpaqueDesc&,
const cv::GMatDesc&, const cv::gapi::KalmanParams& kfParams,
std::shared_ptr<cv::KalmanFilter> &state, const cv::GCompileArgs&)
{
state = std::make_shared<cv::KalmanFilter>(kfParams.transitionMatrix.rows, kfParams.measurementMatrix.rows,
kfParams.controlMatrix.cols, kfParams.transitionMatrix.type());
// initial state
state->statePost = kfParams.state;
state->errorCovPost = kfParams.errorCov;
// dynamic system initialization
state->controlMatrix = kfParams.controlMatrix;
state->measurementMatrix = kfParams.measurementMatrix;
state->transitionMatrix = kfParams.transitionMatrix;
state->processNoiseCov = kfParams.processNoiseCov;
state->measurementNoiseCov = kfParams.measurementNoiseCov;
}
static void run(const cv::Mat& measurements, bool haveMeasurement,
const cv::Mat& control, const cv::gapi::KalmanParams&,
cv::Mat &out, cv::KalmanFilter& state)
{
cv::Mat pre = state.predict(control);
if (haveMeasurement)
state.correct(measurements).copyTo(out);
else
pre.copyTo(out);
}
};
GAPI_OCV_KERNEL_ST(GCPUKalmanFilterNoControl, cv::gapi::video::GKalmanFilterNoControl, cv::KalmanFilter)
{
static void setup(const cv::GMatDesc&, const cv::GOpaqueDesc&,
const cv::gapi::KalmanParams& kfParams,
std::shared_ptr<cv::KalmanFilter> &state,
const cv::GCompileArgs&)
{
state = std::make_shared<cv::KalmanFilter>(kfParams.transitionMatrix.rows, kfParams.measurementMatrix.rows,
0, kfParams.transitionMatrix.type());
// initial state
state->statePost = kfParams.state;
state->errorCovPost = kfParams.errorCov;
// dynamic system initialization
state->measurementMatrix = kfParams.measurementMatrix;
state->transitionMatrix = kfParams.transitionMatrix;
state->processNoiseCov = kfParams.processNoiseCov;
state->measurementNoiseCov = kfParams.measurementNoiseCov;
}
static void run(const cv::Mat& measurements, bool haveMeasurement,
const cv::gapi::KalmanParams&, cv::Mat &out,
cv::KalmanFilter& state)
{
cv::Mat pre = state.predict();
if (haveMeasurement)
state.correct(measurements).copyTo(out);
else
pre.copyTo(out);
}
};
cv::gapi::GKernelPackage cv::gapi::video::cpu::kernels()
{
static auto pkg = cv::gapi::kernels
< GCPUBuildOptFlowPyramid
, GCPUCalcOptFlowLK
, GCPUCalcOptFlowLKForPyr
, GCPUBackgroundSubtractor
, GCPUKalmanFilter
, GCPUKalmanFilterNoControl
>();
return pkg;
}

View File

@ -246,6 +246,28 @@ void parseSSD(const cv::Mat& in_ssd_result,
}
}
static void checkYoloDims(const MatSize& dims) {
const auto d = dims.dims();
// Accept 1x13x13xN and 13x13xN
GAPI_Assert(d >= 2);
if (d >= 3) {
if (dims[d-2] == 13) {
GAPI_Assert(dims[d-1]%5 == 0);
GAPI_Assert(dims[d-2] == 13);
GAPI_Assert(dims[d-3] == 13);
for (int i = 0; i < d-3; i++) {
GAPI_Assert(dims[i] == 1);
}
return;
}
}
// Accept 1x1x1xN, 1x1xN, 1xN
GAPI_Assert(dims[d-1]%(5*13*13) == 0);
for (int i = 0; i < d-1; i++) {
GAPI_Assert(dims[i] == 1);
}
}
void parseYolo(const cv::Mat& in_yolo_result,
const cv::Size& in_size,
const float confidence_threshold,
@ -255,12 +277,12 @@ void parseYolo(const cv::Mat& in_yolo_result,
std::vector<int>& out_labels)
{
const auto& dims = in_yolo_result.size;
GAPI_Assert(dims.dims() == 4);
GAPI_Assert(dims[0] == 1);
GAPI_Assert(dims[1] == 13);
GAPI_Assert(dims[2] == 13);
GAPI_Assert(dims[3] % 5 == 0); // 5 boxes
const auto num_classes = dims[3] / 5 - 5;
checkYoloDims(dims);
int acc = 1;
for (int i = 0; i < dims.dims(); i++) {
acc *= dims[i];
}
const auto num_classes = acc/(5*13*13)-5;
GAPI_Assert(num_classes > 0);
GAPI_Assert(0 < nms_threshold && nms_threshold <= 1);
out_boxes.clear();

View File

@ -952,7 +952,7 @@ namespace
using namespace cv::gimpl;
GModel::Graph g(graph);
GFluidModel fg(graph);
for (const auto node : g.nodes())
for (const auto& node : g.nodes())
{
if (g.metadata(node).get<NodeType>().t == NodeType::DATA)
{
@ -1243,41 +1243,25 @@ void cv::gimpl::GFluidExecutable::reshape(ade::Graph &g, const GCompileArgs &arg
// FIXME: Document what it does
void cv::gimpl::GFluidExecutable::bindInArg(const cv::gimpl::RcDesc &rc, const GRunArg &arg)
{
switch (rc.shape)
{
case GShape::GMAT: m_buffers[m_id_map.at(rc.id)].priv().bindTo(util::get<cv::Mat>(arg), true); break;
case GShape::GSCALAR: m_res.slot<cv::Scalar>()[rc.id] = util::get<cv::Scalar>(arg); break;
case GShape::GARRAY: m_res.slot<cv::detail::VectorRef>()[rc.id] = util::get<cv::detail::VectorRef>(arg); break;
case GShape::GOPAQUE: m_res.slot<cv::detail::OpaqueRef>()[rc.id] = util::get<cv::detail::OpaqueRef>(arg); break;
default: util::throw_error(std::logic_error("Unsupported input GShape type"));
magazine::bindInArg(m_res, rc, arg);
if (rc.shape == GShape::GMAT) {
auto& mat = m_res.slot<cv::Mat>()[rc.id];
// fluid::Buffer::bindTo() is not connected to magazine::bindIn/OutArg and unbind() calls,
// it's simply called each run() without any requirement to call some fluid-specific
// unbind() at the end of run()
m_buffers[m_id_map.at(rc.id)].priv().bindTo(mat, true);
}
}
void cv::gimpl::GFluidExecutable::bindOutArg(const cv::gimpl::RcDesc &rc, const GRunArgP &arg)
{
// Only GMat is supported as return type
using T = GRunArgP;
switch (rc.shape)
{
case GShape::GMAT:
{
cv::GMatDesc desc = m_buffers[m_id_map.at(rc.id)].meta();
auto &bref = m_buffers[m_id_map.at(rc.id)].priv();
switch (arg.index()) {
// FIXME: See the bindInArg comment on Streaming-related changes
case T::index_of<cv::Mat*>(): {
auto &outMat = *util::get<cv::Mat*>(arg);
GAPI_Assert(outMat.data != nullptr);
GAPI_Assert(cv::descr_of(outMat) == desc && "Output argument was not preallocated as it should be ?");
bref.bindTo(outMat, false);
} break;
default: GAPI_Assert(false);
} // switch(arg.index())
break;
}
default: util::throw_error(std::logic_error("Unsupported return GShape type"));
if (rc.shape != GShape::GMAT) {
util::throw_error(std::logic_error("Unsupported return GShape type"));
}
magazine::bindOutArg(m_res, rc, arg);
auto& mat = m_res.slot<cv::Mat>()[rc.id];
m_buffers[m_id_map.at(rc.id)].priv().bindTo(mat, false);
}
void cv::gimpl::GFluidExecutable::packArg(cv::GArg &in_arg, const cv::GArg &op_arg)
@ -1383,6 +1367,10 @@ void cv::gimpl::GFluidExecutable::run(std::vector<InObj> &input_objs,
agent->doWork();
}
}
// In/Out args clean-up is mandatory now with RMat
for (auto &it : input_objs) magazine::unbind(m_res, it.first);
for (auto &it : output_objs) magazine::unbind(m_res, it.first);
}
cv::gimpl::GParallelFluidExecutable::GParallelFluidExecutable(const ade::Graph &g,
@ -1452,7 +1440,7 @@ void GFluidBackendImpl::addMetaSensitiveBackendPasses(ade::ExecutionEngineSetupC
{
// Add FluidData to all data nodes inside island,
// set internal = true if node is not a slot in terms of higher-level GIslandModel
for (const auto node : isl->contents())
for (const auto& node : isl->contents())
{
if (g.metadata(node).get<NodeType>().t == NodeType::DATA &&
!fg.metadata(node).contains<FluidData>())

View File

@ -128,8 +128,7 @@ class GFluidExecutable final: public GIslandExecutable
std::vector<FluidAgent*> m_script;
using Magazine = detail::magazine<cv::Scalar, cv::detail::VectorRef, cv::detail::OpaqueRef>;
Magazine m_res;
cv::gimpl::Mag m_res;
std::size_t m_num_int_buffers; // internal buffers counter (m_buffers - num_scratch)
std::vector<std::size_t> m_scratch_users;

View File

@ -151,6 +151,348 @@ GAPI_FLUID_KERNEL(GFluidAddW, cv::gapi::core::GAddW, false)
enum Arithm { ARITHM_ABSDIFF, ARITHM_ADD, ARITHM_SUBTRACT, ARITHM_MULTIPLY, ARITHM_DIVIDE };
#if CV_SIMD
CV_ALWAYS_INLINE void absdiff_store(short out[], const v_int16& a, const v_int16& b, int x)
{
vx_store(&out[x], v_absdiffs(a, b));
}
CV_ALWAYS_INLINE void absdiff_store(ushort out[], const v_uint16& a, const v_uint16& b, int x)
{
vx_store(&out[x], v_absdiff(a, b));
}
CV_ALWAYS_INLINE void absdiff_store(uchar out[], const v_uint8& a, const v_uint8& b, int x)
{
vx_store(&out[x], v_absdiff(a, b));
}
CV_ALWAYS_INLINE void absdiff_store(float out[], const v_float32& a, const v_float32& b, int x)
{
vx_store(&out[x], v_absdiff(a, b));
}
template<typename T, typename VT>
CV_ALWAYS_INLINE int absdiff_impl(const T in1[], const T in2[], T out[], int length)
{
constexpr int nlanes = static_cast<int>(VT::nlanes);
if (length < nlanes)
return 0;
int x = 0;
for (;;)
{
for (; x <= length - nlanes; x += nlanes)
{
VT a = vx_load(&in1[x]);
VT b = vx_load(&in2[x]);
absdiff_store(out, a, b, x);
}
if (x < length && (in1 != out) && (in2 != out))
{
x = length - nlanes;
continue; // process one more time (unaligned tail)
}
break;
}
return x;
}
template<typename T>
CV_ALWAYS_INLINE int absdiff_simd(const T in1[], const T in2[], T out[], int length)
{
if (std::is_same<T, uchar>::value)
{
return absdiff_impl<uchar, v_uint8>(reinterpret_cast<const uchar*>(in1),
reinterpret_cast<const uchar*>(in2),
reinterpret_cast<uchar*>(out), length);
}
else if (std::is_same<T, ushort>::value)
{
return absdiff_impl<ushort, v_uint16>(reinterpret_cast<const ushort*>(in1),
reinterpret_cast<const ushort*>(in2),
reinterpret_cast<ushort*>(out), length);
}
else if (std::is_same<T, short>::value)
{
return absdiff_impl<short, v_int16>(reinterpret_cast<const short*>(in1),
reinterpret_cast<const short*>(in2),
reinterpret_cast<short*>(out), length);
}
else if (std::is_same<T, float>::value)
{
return absdiff_impl<float, v_float32>(reinterpret_cast<const float*>(in1),
reinterpret_cast<const float*>(in2),
reinterpret_cast<float*>(out), length);
}
return 0;
}
template<typename T, typename VT>
CV_ALWAYS_INLINE int add_simd_sametype(const T in1[], const T in2[], T out[], int length)
{
constexpr int nlanes = static_cast<int>(VT::nlanes);
if (length < nlanes)
return 0;
int x = 0;
for (;;)
{
for (; x <= length - nlanes; x += nlanes)
{
VT a = vx_load(&in1[x]);
VT b = vx_load(&in2[x]);
vx_store(&out[x], a + b);
}
if (x < length && (in1 != out) && (in2 != out))
{
x = length - nlanes;
continue; // process one more time (unaligned tail)
}
break;
}
return x;
}
template<typename SRC, typename DST>
CV_ALWAYS_INLINE int add_simd(const SRC in1[], const SRC in2[], DST out[], int length)
{
if (std::is_same<DST, float>::value && !std::is_same<SRC, float>::value)
return 0;
if (std::is_same<DST, SRC>::value)
{
if (std::is_same<DST, uchar>::value)
{
return add_simd_sametype<uchar, v_uint8>(reinterpret_cast<const uchar*>(in1),
reinterpret_cast<const uchar*>(in2),
reinterpret_cast<uchar*>(out), length);
}
else if (std::is_same<DST, short>::value)
{
return add_simd_sametype<short, v_int16>(reinterpret_cast<const short*>(in1),
reinterpret_cast<const short*>(in2),
reinterpret_cast<short*>(out), length);
}
else if (std::is_same<DST, float>::value)
{
return add_simd_sametype<float, v_float32>(reinterpret_cast<const float*>(in1),
reinterpret_cast<const float*>(in2),
reinterpret_cast<float*>(out), length);
}
}
else if (std::is_same<SRC, short>::value && std::is_same<DST, uchar>::value)
{
constexpr int nlanes = static_cast<int>(v_uint8::nlanes);
if (length < nlanes)
return 0;
int x = 0;
for (;;)
{
for (; x <= length - nlanes; x += nlanes)
{
v_int16 a1 = vx_load(reinterpret_cast<const short*>(&in1[x]));
v_int16 a2 = vx_load(reinterpret_cast<const short*>(&in1[x + nlanes / 2]));
v_int16 b1 = vx_load(reinterpret_cast<const short*>(&in2[x]));
v_int16 b2 = vx_load(reinterpret_cast<const short*>(&in2[x + nlanes / 2]));
vx_store(reinterpret_cast<uchar*>(&out[x]), v_pack_u(a1 + b1, a2 + b2));
}
if (x < length)
{
CV_DbgAssert((reinterpret_cast<const short*>(in1) != reinterpret_cast<const short*>(out)) &&
(reinterpret_cast<const short*>(in2) != reinterpret_cast<const short*>(out)));
x = length - nlanes;
continue; // process one more time (unaligned tail)
}
break;
}
return x;
}
else if (std::is_same<SRC, float>::value && std::is_same<DST, uchar>::value)
{
constexpr int nlanes = static_cast<int>(v_uint8::nlanes);
if (length < nlanes)
return 0;
int x = 0;
for (;;)
{
for (; x <= length - nlanes; x += nlanes)
{
v_float32 a1 = vx_load(reinterpret_cast<const float*>(&in1[x]));
v_float32 a2 = vx_load(reinterpret_cast<const float*>(&in1[x + nlanes / 4]));
v_float32 a3 = vx_load(reinterpret_cast<const float*>(&in1[x + 2 * nlanes / 4]));
v_float32 a4 = vx_load(reinterpret_cast<const float*>(&in1[x + 3 * nlanes / 4]));
v_float32 b1 = vx_load(reinterpret_cast<const float*>(&in2[x]));
v_float32 b2 = vx_load(reinterpret_cast<const float*>(&in2[x + nlanes / 4]));
v_float32 b3 = vx_load(reinterpret_cast<const float*>(&in2[x + 2 * nlanes / 4]));
v_float32 b4 = vx_load(reinterpret_cast<const float*>(&in2[x + 3 * nlanes / 4]));
vx_store(reinterpret_cast<uchar*>(&out[x]), v_pack_u(v_pack(v_round(a1 + b1), v_round(a2 + b2)),
v_pack(v_round(a3 + b3), v_round(a4 + b4))));
}
if (x < length)
{
CV_DbgAssert((reinterpret_cast<const float*>(in1) != reinterpret_cast<const float*>(out)) &&
(reinterpret_cast<const float*>(in2) != reinterpret_cast<const float*>(out)));
x = length - nlanes;
continue; // process one more time (unaligned tail)
}
break;
}
return x;
}
return 0;
}
template<typename T, typename VT>
CV_ALWAYS_INLINE int sub_simd_sametype(const T in1[], const T in2[], T out[], int length)
{
constexpr int nlanes = static_cast<int>(VT::nlanes);
if (length < nlanes)
return 0;
int x = 0;
for (;;)
{
for (; x <= length - nlanes; x += nlanes)
{
VT a = vx_load(&in1[x]);
VT b = vx_load(&in2[x]);
vx_store(&out[x], a - b);
}
if (x < length && (in1 != out) && (in2 != out))
{
x = length - nlanes;
continue; // process one more time (unaligned tail)
}
break;
}
return x;
}
template<typename SRC, typename DST>
CV_ALWAYS_INLINE int sub_simd(const SRC in1[], const SRC in2[], DST out[], int length)
{
if (std::is_same<DST, float>::value && !std::is_same<SRC, float>::value)
return 0;
if (std::is_same<DST, SRC>::value)
{
if (std::is_same<DST, uchar>::value)
{
return sub_simd_sametype<uchar, v_uint8>(reinterpret_cast<const uchar*>(in1),
reinterpret_cast<const uchar*>(in2),
reinterpret_cast<uchar*>(out), length);
}
else if (std::is_same<DST, short>::value)
{
return sub_simd_sametype<short, v_int16>(reinterpret_cast<const short*>(in1),
reinterpret_cast<const short*>(in2),
reinterpret_cast<short*>(out), length);
}
else if (std::is_same<DST, float>::value)
{
return sub_simd_sametype<float, v_float32>(reinterpret_cast<const float*>(in1),
reinterpret_cast<const float*>(in2),
reinterpret_cast<float*>(out), length);
}
}
else if (std::is_same<SRC, short>::value && std::is_same<DST, uchar>::value)
{
constexpr int nlanes = static_cast<int>(v_uint8::nlanes);
if (length < nlanes)
return 0;
int x = 0;
for (;;)
{
for (; x <= length - nlanes; x += nlanes)
{
v_int16 a1 = vx_load(reinterpret_cast<const short*>(&in1[x]));
v_int16 a2 = vx_load(reinterpret_cast<const short*>(&in1[x + nlanes / 2]));
v_int16 b1 = vx_load(reinterpret_cast<const short*>(&in2[x]));
v_int16 b2 = vx_load(reinterpret_cast<const short*>(&in2[x + nlanes / 2]));
vx_store(reinterpret_cast<uchar*>(&out[x]), v_pack_u(a1 - b1, a2 - b2));
}
if (x < length)
{
CV_DbgAssert((reinterpret_cast<const short*>(in1) != reinterpret_cast<const short*>(out)) &&
(reinterpret_cast<const short*>(in2) != reinterpret_cast<const short*>(out)));
x = length - nlanes;
continue; // process one more time (unaligned tail)
}
break;
}
return x;
}
else if (std::is_same<SRC, float>::value && std::is_same<DST, uchar>::value)
{
constexpr int nlanes = static_cast<int>(v_uint8::nlanes);
if (length < nlanes)
return 0;
int x = 0;
for (;;)
{
for (; x <= length - nlanes; x += nlanes)
{
v_float32 a1 = vx_load(reinterpret_cast<const float*>(&in1[x]));
v_float32 a2 = vx_load(reinterpret_cast<const float*>(&in1[x + nlanes / 4]));
v_float32 a3 = vx_load(reinterpret_cast<const float*>(&in1[x + 2 * nlanes / 4]));
v_float32 a4 = vx_load(reinterpret_cast<const float*>(&in1[x + 3 * nlanes / 4]));
v_float32 b1 = vx_load(reinterpret_cast<const float*>(&in2[x]));
v_float32 b2 = vx_load(reinterpret_cast<const float*>(&in2[x + nlanes / 4]));
v_float32 b3 = vx_load(reinterpret_cast<const float*>(&in2[x + 2 * nlanes / 4]));
v_float32 b4 = vx_load(reinterpret_cast<const float*>(&in2[x + 3 * nlanes / 4]));
vx_store(reinterpret_cast<uchar*>(&out[x]), v_pack_u(v_pack(v_round(a1 - b1), v_round(a2 - b2)),
v_pack(v_round(a3 - b3), v_round(a4 - b4))));
}
if (x < length)
{
CV_DbgAssert((reinterpret_cast<const float*>(in1) != reinterpret_cast<const float*>(out)) &&
(reinterpret_cast<const float*>(in2) != reinterpret_cast<const float*>(out)));
x = length - nlanes;
continue; // process one more time (unaligned tail)
}
break;
}
return x;
}
return 0;
}
#endif
template<typename DST, typename SRC1, typename SRC2>
static void run_arithm(Buffer &dst, const View &src1, const View &src2, Arithm arithm,
double scale=1)
@ -168,29 +510,37 @@ static void run_arithm(Buffer &dst, const View &src1, const View &src2, Arithm a
// NB: assume in/out types are not 64-bits
float _scale = static_cast<float>( scale );
int x = 0;
switch (arithm)
{
case ARITHM_ABSDIFF:
for (int l=0; l < length; l++)
out[l] = absdiff<DST>(in1[l], in2[l]);
break;
case ARITHM_ADD:
for (int l=0; l < length; l++)
out[l] = add<DST>(in1[l], in2[l]);
break;
case ARITHM_SUBTRACT:
for (int l=0; l < length; l++)
out[l] = sub<DST>(in1[l], in2[l]);
break;
case ARITHM_MULTIPLY:
for (int l=0; l < length; l++)
out[l] = mul<DST>(in1[l], in2[l], _scale);
break;
case ARITHM_DIVIDE:
for (int l=0; l < length; l++)
out[l] = div<DST>(in1[l], in2[l], _scale);
break;
default: CV_Error(cv::Error::StsBadArg, "unsupported arithmetic operation");
case ARITHM_ADD:
{
#if CV_SIMD
x = add_simd(in1, in2, out, length);
#endif
for (; x < length; ++x)
out[x] = add<DST>(in1[x], in2[x]);
break;
}
case ARITHM_SUBTRACT:
{
#if CV_SIMD
x = sub_simd(in1, in2, out, length);
#endif
for (; x < length; ++x)
out[x] = sub<DST>(in1[x], in2[x]);
break;
}
case ARITHM_MULTIPLY:
for (; x < length; ++x)
out[x] = mul<DST>(in1[x], in2[x], _scale);
break;
case ARITHM_DIVIDE:
for (; x < length; ++x)
out[x] = div<DST>(in1[x], in2[x], _scale);
break;
default: CV_Error(cv::Error::StsBadArg, "unsupported arithmetic operation");
}
}
@ -270,6 +620,29 @@ GAPI_FLUID_KERNEL(GFluidDiv, cv::gapi::core::GDiv, false)
}
};
template<typename DST, typename SRC1, typename SRC2>
static void run_absdiff(Buffer &dst, const View &src1, const View &src2)
{
static_assert(std::is_same<SRC1, SRC2>::value, "wrong types");
static_assert(std::is_same<SRC1, DST>::value, "wrong types");
const auto *in1 = src1.InLine<SRC1>(0);
const auto *in2 = src2.InLine<SRC2>(0);
auto *out = dst.OutLine<DST>();
int width = dst.length();
int chan = dst.meta().chan;
int length = width * chan;
int x = 0;
#if CV_SIMD
x = absdiff_simd(in1, in2, out, length);
#endif
for (; x < length; ++x)
out[x] = absdiff<DST>(in1[x], in2[x]);
}
GAPI_FLUID_KERNEL(GFluidAbsDiff, cv::gapi::core::GAbsDiff, false)
{
static const int Window = 1;
@ -277,10 +650,10 @@ GAPI_FLUID_KERNEL(GFluidAbsDiff, cv::gapi::core::GAbsDiff, false)
static void run(const View &src1, const View &src2, Buffer &dst)
{
// DST SRC1 SRC2 OP __VA_ARGS__
BINARY_(uchar , uchar , uchar , run_arithm, dst, src1, src2, ARITHM_ABSDIFF);
BINARY_(ushort, ushort, ushort, run_arithm, dst, src1, src2, ARITHM_ABSDIFF);
BINARY_( short, short, short, run_arithm, dst, src1, src2, ARITHM_ABSDIFF);
BINARY_( float, float, float, run_arithm, dst, src1, src2, ARITHM_ABSDIFF);
BINARY_(uchar , uchar , uchar , run_absdiff, dst, src1, src2);
BINARY_(ushort, ushort, ushort, run_absdiff, dst, src1, src2);
BINARY_( short, short, short, run_absdiff, dst, src1, src2);
BINARY_( float, float, float, run_absdiff, dst, src1, src2);
CV_Error(cv::Error::StsBadArg, "unsupported combination of types");
}
@ -2302,40 +2675,6 @@ GAPI_FLUID_KERNEL(GFluidSqrt, cv::gapi::core::GSqrt, false)
}
};
GAPI_FLUID_KERNEL(GFluidCopy, cv::gapi::core::GCopy, false)
{
static const int Window = 1;
static void run(const View &src, Buffer &dst)
{
const auto *in = src.InLine<uchar>(0);
auto *out = dst.OutLine<uchar>();
GAPI_DbgAssert(dst.length() == src.length());
GAPI_DbgAssert(dst.meta().chan == src.meta().chan);
GAPI_DbgAssert(dst.meta().depth == src.meta().depth);
int width = src.length();
int elem_size = CV_ELEM_SIZE(CV_MAKETYPE(src.meta().depth, src.meta().chan));
int w = 0; // cycle counter
#if CV_SIMD128
for (; w <= width*elem_size-16; w+=16)
{
v_uint8x16 a;
a = v_load(&in[w]);
v_store(&out[w], a);
}
#endif
for (; w < width*elem_size; w++)
{
out[w] = in[w];
}
}
};
} // namespace fliud
} // namespace gapi
} // namespace cv
@ -2395,7 +2734,6 @@ cv::gapi::GKernelPackage cv::gapi::core::fluid::kernels()
,GFluidInRange
,GFluidResize
,GFluidSqrt
,GFluidCopy
#if 0
,GFluidMean -- not fluid
,GFluidSum -- not fluid

View File

@ -0,0 +1,39 @@
#include <opencv2/gapi/infer/bindings_ie.hpp>
cv::gapi::ie::PyParams::PyParams(const std::string &tag,
const std::string &model,
const std::string &weights,
const std::string &device)
: m_priv(std::make_shared<Params<cv::gapi::Generic>>(tag, model, weights, device)) {
}
cv::gapi::ie::PyParams::PyParams(const std::string &tag,
const std::string &model,
const std::string &device)
: m_priv(std::make_shared<Params<cv::gapi::Generic>>(tag, model, device)) {
}
cv::gapi::GBackend cv::gapi::ie::PyParams::backend() const {
return m_priv->backend();
}
std::string cv::gapi::ie::PyParams::tag() const {
return m_priv->tag();
}
cv::util::any cv::gapi::ie::PyParams::params() const {
return m_priv->params();
}
cv::gapi::ie::PyParams cv::gapi::ie::params(const std::string &tag,
const std::string &model,
const std::string &weights,
const std::string &device) {
return {tag, model, weights, device};
}
cv::gapi::ie::PyParams cv::gapi::ie::params(const std::string &tag,
const std::string &model,
const std::string &device) {
return {tag, model, device};
}

View File

@ -36,6 +36,7 @@
#include <opencv2/gapi/gtype_traits.hpp>
#include <opencv2/gapi/infer.hpp>
#include <opencv2/gapi/own/convert.hpp>
#include <opencv2/gapi/gframe.hpp>
#include "compiler/gobjref.hpp"
#include "compiler/gmodel.hpp"
@ -45,6 +46,10 @@
#include "api/gbackend_priv.hpp" // FIXME: Make it part of Backend SDK!
#if INF_ENGINE_RELEASE < 2021010000
#include "ie_compound_blob.h"
#endif
namespace IE = InferenceEngine;
namespace {
@ -151,6 +156,25 @@ inline IE::Blob::Ptr wrapIE(const cv::Mat &mat, cv::gapi::ie::TraitAs hint) {
return IE::Blob::Ptr{};
}
inline IE::Blob::Ptr wrapIE(const cv::MediaFrame::View& view,
const cv::GFrameDesc& desc) {
switch (desc.fmt) {
case cv::MediaFormat::BGR: {
auto bgr = cv::Mat(desc.size, CV_8UC3, view.ptr[0], view.stride[0]);
return wrapIE(bgr, cv::gapi::ie::TraitAs::IMAGE);
}
case cv::MediaFormat::NV12: {
auto y_plane = cv::Mat(desc.size, CV_8UC1, view.ptr[0], view.stride[0]);
auto uv_plane = cv::Mat(desc.size / 2, CV_8UC2, view.ptr[1], view.stride[1]);
return cv::gapi::ie::util::to_ie(y_plane, uv_plane);
}
default:
GAPI_Assert(false && "Unsupported media format for IE backend");
}
GAPI_Assert(false);
}
template<class MatType>
inline void copyFromIE(const IE::Blob::Ptr &blob, MatType &mat) {
switch (blob->getTensorDesc().getPrecision()) {
@ -175,11 +199,27 @@ struct IEUnit {
IE::InputsDataMap inputs;
IE::OutputsDataMap outputs;
IE::ExecutableNetwork this_network;
cv::gimpl::ie::wrap::Plugin this_plugin;
explicit IEUnit(const cv::gapi::ie::detail::ParamDesc &pp)
: params(pp) {
net = cv::gimpl::ie::wrap::readNetwork(params);
inputs = net.getInputsInfo();
outputs = net.getOutputsInfo();
if (params.kind == cv::gapi::ie::detail::ParamDesc::Kind::Load) {
net = cv::gimpl::ie::wrap::readNetwork(params);
inputs = net.getInputsInfo();
outputs = net.getOutputsInfo();
} else if (params.kind == cv::gapi::ie::detail::ParamDesc::Kind::Import) {
this_plugin = cv::gimpl::ie::wrap::getPlugin(params);
this_plugin.SetConfig(params.config);
this_network = cv::gimpl::ie::wrap::importNetwork(this_plugin, params);
// FIXME: ICNNetwork returns InputsDataMap/OutputsDataMap,
// but ExecutableNetwork returns ConstInputsDataMap/ConstOutputsDataMap
inputs = cv::gimpl::ie::wrap::toInputsDataMap(this_network.GetInputsInfo());
outputs = cv::gimpl::ie::wrap::toOutputsDataMap(this_network.GetOutputsInfo());
} else {
cv::util::throw_error(std::logic_error("Unsupported ParamDesc::Kind"));
}
// The practice shows that not all inputs and not all outputs
// are mandatory to specify in IE model.
// So what we're concerned here about is:
@ -205,10 +245,16 @@ struct IEUnit {
// This method is [supposed to be] called at Island compilation stage
cv::gimpl::ie::IECompiled compile() const {
auto plugin = cv::gimpl::ie::wrap::getPlugin(params);
auto this_network = cv::gimpl::ie::wrap::loadNetwork(plugin, net, params);
auto this_request = this_network.CreateInferRequest();
IEUnit* non_const_this = const_cast<IEUnit*>(this);
if (params.kind == cv::gapi::ie::detail::ParamDesc::Kind::Load) {
// FIXME: In case importNetwork for fill inputs/outputs need to obtain ExecutableNetwork, but
// for loadNetwork they can be obtained by using readNetwork
non_const_this->this_plugin = cv::gimpl::ie::wrap::getPlugin(params);
non_const_this->this_plugin.SetConfig(params.config);
non_const_this->this_network = cv::gimpl::ie::wrap::loadNetwork(non_const_this->this_plugin, net, params);
}
auto this_request = non_const_this->this_network.CreateInferRequest();
// Bind const data to infer request
for (auto &&p : params.const_inputs) {
// FIXME: SetBlob is known to be inefficient,
@ -217,7 +263,16 @@ struct IEUnit {
// Still, constant data is to set only once.
this_request.SetBlob(p.first, wrapIE(p.second.first, p.second.second));
}
return {plugin, this_network, this_request};
// Bind const data to infer request
for (auto &&p : params.const_inputs) {
// FIXME: SetBlob is known to be inefficient,
// it is worth to make a customizable "initializer" and pass the
// cv::Mat-wrapped blob there to support IE's optimal "GetBlob idiom"
// Still, constant data is to set only once.
this_request.SetBlob(p.first, wrapIE(p.second.first, p.second.second));
}
return {this_plugin, this_network, this_request};
}
};
@ -225,6 +280,7 @@ struct IECallContext
{
// Input parameters passed to an inference operation.
std::vector<cv::GArg> args;
cv::GShapes in_shapes;
//FIXME: avoid conversion of arguments from internal representation to OpenCV one on each call
//to OCV kernel. (This can be achieved by a two single time conversions in GCPUExecutable::run,
@ -236,6 +292,10 @@ struct IECallContext
template<typename T>
const T& inArg(std::size_t input) { return args.at(input).get<T>(); }
const cv::MediaFrame& inFrame(std::size_t input) {
return inArg<cv::MediaFrame>(input);
}
// Syntax sugar
const cv::Mat& inMat(std::size_t input) {
return inArg<cv::Mat>(input);
@ -288,6 +348,24 @@ using GConstGIEModel = ade::ConstTypedGraph
, IEUnit
, IECallable
>;
using Views = std::vector<std::unique_ptr<cv::MediaFrame::View>>;
inline IE::Blob::Ptr extractBlob(IECallContext& ctx, std::size_t i, Views& views) {
switch (ctx.in_shapes[i]) {
case cv::GShape::GFRAME: {
const auto& frame = ctx.inFrame(i);
views.emplace_back(new cv::MediaFrame::View(frame.access(cv::MediaFrame::Access::R)));
return wrapIE(*views.back(), frame.desc());
}
case cv::GShape::GMAT: {
return wrapIE(ctx.inMat(i), cv::gapi::ie::TraitAs::IMAGE);
}
default:
GAPI_Assert("Unsupported input shape for IE backend");
}
GAPI_Assert(false);
}
} // anonymous namespace
// GCPUExcecutable implementation //////////////////////////////////////////////
@ -353,6 +431,8 @@ cv::GArg cv::gimpl::ie::GIEExecutable::packArg(const cv::GArg &arg) {
// (and constructed by either bindIn/Out or resetInternal)
case GShape::GOPAQUE: return GArg(m_res.slot<cv::detail::OpaqueRef>().at(ref.id));
case GShape::GFRAME: return GArg(m_res.slot<cv::MediaFrame>().at(ref.id));
default:
util::throw_error(std::logic_error("Unsupported GShape type"));
break;
@ -382,6 +462,12 @@ void cv::gimpl::ie::GIEExecutable::run(std::vector<InObj> &&input_objs,
std::back_inserter(context.args),
std::bind(&GIEExecutable::packArg, this, _1));
// NB: Need to store inputs shape to recognize GFrame/GMat
ade::util::transform(op.args,
std::back_inserter(context.in_shapes),
[](const cv::GArg& arg) {
return arg.get<cv::gimpl::RcDesc>().shape;
});
// - Output parameters.
for (const auto &out_it : ade::util::indexed(op.outs)) {
// FIXME: Can the same GArg type resolution mechanism be reused here?
@ -397,12 +483,44 @@ void cv::gimpl::ie::GIEExecutable::run(std::vector<InObj> &&input_objs,
kk.run(this_iec, uu, context);
for (auto &it : output_objs) magazine::writeBack(m_res, it.first, it.second);
// In/Out args clean-up is mandatory now with RMat
for (auto &it : input_objs) magazine::unbind(m_res, it.first);
for (auto &it : output_objs) magazine::unbind(m_res, it.first);
}
namespace cv {
namespace gimpl {
namespace ie {
static void configureInputInfo(const IE::InputInfo::Ptr& ii, const cv::GMetaArg mm) {
switch (mm.index()) {
case cv::GMetaArg::index_of<cv::GMatDesc>():
{
ii->setPrecision(toIE(util::get<cv::GMatDesc>(mm).depth));
break;
}
case cv::GMetaArg::index_of<cv::GFrameDesc>():
{
const auto &meta = util::get<cv::GFrameDesc>(mm);
switch (meta.fmt) {
case cv::MediaFormat::NV12:
ii->getPreProcess().setColorFormat(IE::ColorFormat::NV12);
break;
case cv::MediaFormat::BGR:
// NB: Do nothing
break;
default:
GAPI_Assert(false && "Unsupported media format for IE backend");
}
ii->setPrecision(toIE(CV_8U));
break;
}
default:
util::throw_error(std::runtime_error("Unsupported input meta for IE backend"));
}
}
struct Infer: public cv::detail::KernelTag {
using API = cv::GInferBase;
static cv::gapi::GBackend backend() { return cv::gapi::ie::backend(); }
@ -433,11 +551,7 @@ struct Infer: public cv::detail::KernelTag {
auto &&ii = uu.inputs.at(std::get<0>(it));
const auto & mm = std::get<1>(it);
GAPI_Assert(util::holds_alternative<cv::GMatDesc>(mm)
&& "Non-GMat inputs are not supported");
const auto &meta = util::get<cv::GMatDesc>(mm);
ii->setPrecision(toIE(meta.depth));
configureInputInfo(ii, mm);
ii->getPreProcess().setResizeAlgorithm(IE::RESIZE_BILINEAR);
}
@ -460,15 +574,12 @@ struct Infer: public cv::detail::KernelTag {
static void run(IECompiled &iec, const IEUnit &uu, IECallContext &ctx) {
// non-generic version for now:
// - assumes all inputs/outputs are always Mats
Views views;
for (auto i : ade::util::iota(uu.params.num_in)) {
// TODO: Ideally we shouldn't do SetBlob() but GetBlob() instead,
// and redirect our data producers to this memory
// (A memory dialog comes to the picture again)
const cv::Mat this_mat = ctx.inMat(i);
// FIXME: By default here we trait our inputs as images.
// May be we need to make some more intelligence here about it
IE::Blob::Ptr this_blob = wrapIE(this_mat, cv::gapi::ie::TraitAs::IMAGE);
IE::Blob::Ptr this_blob = extractBlob(ctx, i, views);
iec.this_request.SetBlob(uu.params.input_names[i], this_blob);
}
iec.this_request.Infer();
@ -486,6 +597,67 @@ struct Infer: public cv::detail::KernelTag {
}
};
struct InferROI: public cv::detail::KernelTag {
using API = cv::GInferROIBase;
static cv::gapi::GBackend backend() { return cv::gapi::ie::backend(); }
static KImpl kernel() { return KImpl{outMeta, run}; }
static cv::GMetaArgs outMeta(const ade::Graph &gr,
const ade::NodeHandle &nh,
const cv::GMetaArgs &in_metas,
const cv::GArgs &/*in_args*/) {
cv::GMetaArgs result;
GConstGIEModel gm(gr);
const auto &uu = gm.metadata(nh).get<IEUnit>();
// Initialize input information
// FIXME: So far it is pretty limited
GAPI_Assert(1u == uu.params.input_names.size());
GAPI_Assert(2u == in_metas.size());
// 0th is ROI, 1st is input image
auto &&ii = uu.inputs.at(uu.params.input_names.at(0));
auto &&mm = in_metas.at(1u);
configureInputInfo(ii, mm);
ii->getPreProcess().setResizeAlgorithm(IE::RESIZE_BILINEAR);
// FIXME: It would be nice here to have an exact number of network's
// input/output parameters. Probably GCall should store it here for us.
// It doesn't, as far as I know..
for (const auto &out_name : uu.params.output_names) {
// NOTE: our output_names vector follows the API order
// of this operation's outputs
const IE::DataPtr& ie_out = uu.outputs.at(out_name);
const IE::SizeVector dims = ie_out->getTensorDesc().getDims();
cv::GMatDesc outm(toCV(ie_out->getPrecision()),
toCV(ie_out->getTensorDesc().getDims()));
result.emplace_back(outm);
}
return result;
}
static void run(IECompiled &iec, const IEUnit &uu, IECallContext &ctx) {
// non-generic version for now, per the InferROI's definition
GAPI_Assert(uu.params.num_in == 1);
const auto& this_roi = ctx.inArg<cv::detail::OpaqueRef>(0).rref<cv::Rect>();
Views views;
IE::Blob::Ptr this_blob = extractBlob(ctx, 1, views);
iec.this_request.SetBlob(*uu.params.input_names.begin(),
IE::make_shared_blob(this_blob, toIE(this_roi)));
iec.this_request.Infer();
for (auto i : ade::util::iota(uu.params.num_out)) {
cv::Mat& out_mat = ctx.outMatR(i);
IE::Blob::Ptr out_blob = iec.this_request.GetBlob(uu.params.output_names[i]);
copyFromIE(out_blob, out_mat);
}
}
};
struct InferList: public cv::detail::KernelTag {
using API = cv::GInferListBase;
static cv::gapi::GBackend backend() { return cv::gapi::ie::backend(); }
@ -512,12 +684,7 @@ struct InferList: public cv::detail::KernelTag {
for (auto &&input_name : uu.params.input_names) {
auto &&ii = uu.inputs.at(input_name);
const auto & mm = in_metas[idx++];
GAPI_Assert(util::holds_alternative<cv::GMatDesc>(mm)
&& "Non-GMat inputs are not supported");
const auto &meta = util::get<cv::GMatDesc>(mm);
ii->setPrecision(toIE(meta.depth));
configureInputInfo(ii, mm);
ii->getPreProcess().setResizeAlgorithm(IE::RESIZE_BILINEAR);
}
@ -536,9 +703,9 @@ struct InferList: public cv::detail::KernelTag {
GAPI_Assert(uu.params.num_in == 1); // roi list is not counted in net's inputs
const auto& in_roi_vec = ctx.inArg<cv::detail::VectorRef>(0u).rref<cv::Rect>();
const cv::Mat this_mat = ctx.inMat(1u);
// Since we do a ROI list inference, always assume our input buffer is image
IE::Blob::Ptr this_blob = wrapIE(this_mat, cv::gapi::ie::TraitAs::IMAGE);
Views views;
IE::Blob::Ptr this_blob = extractBlob(ctx, 1, views);
// FIXME: This could be done ONCE at graph compile stage!
std::vector< std::vector<int> > cached_dims(uu.params.num_out);
@ -602,11 +769,30 @@ struct InferList2: public cv::detail::KernelTag {
// "blob"-based ones)
// FIXME: this is filtering not done, actually! GArrayDesc has
// no hint for its underlying type!
const auto &mm_0 = in_metas[0u];
const auto &meta_0 = util::get<cv::GMatDesc>(mm_0);
GAPI_Assert( !meta_0.isND()
const auto &mm_0 = in_metas[0u];
switch (in_metas[0u].index()) {
case cv::GMetaArg::index_of<cv::GMatDesc>(): {
const auto &meta_0 = util::get<cv::GMatDesc>(mm_0);
GAPI_Assert( !meta_0.isND()
&& !meta_0.planar
&& "Only images are supported as the 0th argument");
break;
}
case cv::GMetaArg::index_of<cv::GFrameDesc>(): {
// FIXME: Is there any validation for GFrame ?
break;
}
default:
util::throw_error(std::runtime_error("Unsupported input meta for IE backend"));
}
if (util::holds_alternative<cv::GMatDesc>(mm_0)) {
const auto &meta_0 = util::get<cv::GMatDesc>(mm_0);
GAPI_Assert( !meta_0.isND()
&& !meta_0.planar
&& "Only images are supported as the 0th argument");
}
std::size_t idx = 1u;
for (auto &&input_name : uu.params.input_names) {
auto &ii = uu.inputs.at(input_name);
@ -616,7 +802,7 @@ struct InferList2: public cv::detail::KernelTag {
if (op.k.inKinds[idx] == cv::detail::OpaqueKind::CV_RECT) {
// This is a cv::Rect -- configure the IE preprocessing
ii->setPrecision(toIE(meta_0.depth));
configureInputInfo(ii, mm_0);
ii->getPreProcess().setResizeAlgorithm(IE::RESIZE_BILINEAR);
} else {
// This is a cv::GMat (equals to: cv::Mat)
@ -639,9 +825,8 @@ struct InferList2: public cv::detail::KernelTag {
GAPI_Assert(ctx.args.size() > 1u
&& "This operation must have at least two arguments");
// Since we do a ROI list inference, always assume our input buffer is image
const cv::Mat mat_0 = ctx.inMat(0u);
IE::Blob::Ptr blob_0 = wrapIE(mat_0, cv::gapi::ie::TraitAs::IMAGE);
Views views;
IE::Blob::Ptr blob_0 = extractBlob(ctx, 0, views);
// Take the next argument, which must be vector (of any kind).
// Use it only to obtain the ROI list size (sizes of all other
@ -717,9 +902,23 @@ namespace {
// FIXME: Introduce a DNNBackend interface which'd specify
// the framework for this???
GIEModel gm(gr);
const auto &np = gm.metadata(nh).get<NetworkParams>();
const auto &pp = cv::util::any_cast<cv::gapi::ie::detail::ParamDesc>(np.opaque);
auto &np = gm.metadata(nh).get<NetworkParams>();
auto &pp = cv::util::any_cast<cv::gapi::ie::detail::ParamDesc>(np.opaque);
const auto &ki = cv::util::any_cast<KImpl>(ii.opaque);
GModel::Graph model(gr);
auto& op = model.metadata(nh).get<Op>();
// NB: In case generic infer, info about in/out names is stored in operation (op.params)
if (pp.is_generic)
{
auto& info = cv::util::any_cast<cv::InOutInfo>(op.params);
pp.input_names = info.in_names;
pp.output_names = info.out_names;
pp.num_in = info.in_names.size();
pp.num_out = info.out_names.size();
}
gm.metadata(nh).set(IEUnit{pp});
gm.metadata(nh).set(IECallable{ki.run});
gm.metadata(nh).set(CustomMetaFunction{ki.customMetaFunc});
@ -733,6 +932,7 @@ namespace {
virtual cv::gapi::GKernelPackage auxiliaryKernels() const override {
return cv::gapi::kernels< cv::gimpl::ie::Infer
, cv::gimpl::ie::InferROI
, cv::gimpl::ie::InferList
, cv::gimpl::ie::InferList2
>();
@ -760,6 +960,16 @@ IE::Blob::Ptr cv::gapi::ie::util::to_ie(cv::Mat &blob) {
return wrapIE(blob, cv::gapi::ie::TraitAs::IMAGE);
}
IE::Blob::Ptr cv::gapi::ie::util::to_ie(cv::Mat &y_plane, cv::Mat &uv_plane) {
auto y_blob = wrapIE(y_plane, cv::gapi::ie::TraitAs::IMAGE);
auto uv_blob = wrapIE(uv_plane, cv::gapi::ie::TraitAs::IMAGE);
#if INF_ENGINE_RELEASE >= 2021010000
return IE::make_shared_blob<IE::NV12Blob>(y_blob, uv_blob);
#else
return IE::make_shared_blob<InferenceEngine::NV12Blob>(y_blob, uv_blob);
#endif
}
#else // HAVE_INF_ENGINE
cv::gapi::GBackend cv::gapi::ie::backend() {

Some files were not shown because too many files have changed in this diff Show More