diff --git a/inference-engine/thirdparty/fluid/checksum.txt b/inference-engine/thirdparty/fluid/checksum.txt index 9cc3376ca53..ec287302175 100644 --- a/inference-engine/thirdparty/fluid/checksum.txt +++ b/inference-engine/thirdparty/fluid/checksum.txt @@ -1 +1 @@ -d8947b3280c8644f9828fac2b36f5f5a +4cf15d9809d418afab0679189cd08d12 diff --git a/inference-engine/thirdparty/fluid/modules/gapi/CMakeLists.txt b/inference-engine/thirdparty/fluid/modules/gapi/CMakeLists.txt index 76d45777747..2ad7be9ad2f 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/CMakeLists.txt +++ b/inference-engine/thirdparty/fluid/modules/gapi/CMakeLists.txt @@ -38,6 +38,10 @@ if(MSVC) endif() endif() +if(CMAKE_CXX_COMPILER_ID STREQUAL "AppleClang") # don't add Clang here: issue should be investigated and fixed (workaround for Apple only) + ocv_warnings_disable(CMAKE_CXX_FLAGS -Wrange-loop-analysis) # https://github.com/opencv/opencv/issues/18928 +endif() + file(GLOB gapi_ext_hdrs "${CMAKE_CURRENT_LIST_DIR}/include/opencv2/*.hpp" "${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/*.hpp" @@ -49,6 +53,7 @@ file(GLOB gapi_ext_hdrs "${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/ocl/*.hpp" "${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/own/*.hpp" "${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/render/*.hpp" + "${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/s11n/*.hpp" "${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/streaming/*.hpp" "${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/plaidml/*.hpp" "${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/util/*.hpp" @@ -56,6 +61,7 @@ file(GLOB gapi_ext_hdrs set(gapi_srcs # Front-end part + src/api/grunarg.cpp src/api/gorigin.cpp src/api/gmat.cpp src/api/garray.cpp @@ -73,10 +79,12 @@ set(gapi_srcs src/api/kernels_imgproc.cpp src/api/kernels_video.cpp src/api/kernels_nnparsers.cpp + src/api/kernels_streaming.cpp src/api/render.cpp src/api/render_ocv.cpp src/api/ginfer.cpp - src/api/ft_render.cpp + src/api/media.cpp + src/api/rmat.cpp # Compiler part src/compiler/gmodel.cpp @@ -95,9 +103,11 @@ set(gapi_srcs src/compiler/passes/pattern_matching.cpp src/compiler/passes/perform_substitution.cpp src/compiler/passes/streaming.cpp + src/compiler/passes/intrin.cpp # Executor src/executor/gexecutor.cpp + src/executor/gtbbexecutor.cpp src/executor/gstreamingexecutor.cpp src/executor/gasync.cpp @@ -127,21 +137,31 @@ set(gapi_srcs src/backends/ie/giebackend.cpp src/backends/ie/giebackend/giewrapper.cpp - # Render Backend. - src/backends/render/grenderocvbackend.cpp - src/backends/render/grenderocv.cpp + # ONNX backend + src/backends/onnx/gonnxbackend.cpp - #PlaidML Backend + # Render backend + src/backends/render/grenderocv.cpp + src/backends/render/ft_render.cpp + + # PlaidML Backend src/backends/plaidml/gplaidmlcore.cpp src/backends/plaidml/gplaidmlbackend.cpp - # Compound + # Common backend code + src/backends/common/gmetabackend.cpp src/backends/common/gcompoundbackend.cpp src/backends/common/gcompoundkernel.cpp # Serialization API and routines src/api/s11n.cpp src/backends/common/serialization.cpp + + # Streaming backend + src/backends/streaming/gstreamingbackend.cpp + + # Python bridge + src/backends/ie/bindings_ie.cpp ) ocv_add_dispatched_file(backends/fluid/gfluidimgproc_func SSE4_1 AVX2) @@ -180,6 +200,10 @@ if(TARGET opencv_test_gapi) target_link_libraries(opencv_test_gapi PRIVATE ade) endif() +if(HAVE_TBB AND TARGET opencv_test_gapi) + ocv_target_link_libraries(opencv_test_gapi PRIVATE tbb) +endif() + if(HAVE_FREETYPE) ocv_target_compile_definitions(${the_module} PRIVATE -DHAVE_FREETYPE) if(TARGET opencv_test_gapi) @@ -198,10 +222,20 @@ if(HAVE_PLAIDML) ocv_target_include_directories(${the_module} SYSTEM PRIVATE ${PLAIDML_INCLUDE_DIRS}) endif() + if(WIN32) # Required for htonl/ntohl on Windows ocv_target_link_libraries(${the_module} PRIVATE wsock32 ws2_32) endif() +if(HAVE_ONNX) + ocv_target_link_libraries(${the_module} PRIVATE ${ONNX_LIBRARY}) + ocv_target_compile_definitions(${the_module} PRIVATE HAVE_ONNX=1) + if(TARGET opencv_test_gapi) + ocv_target_compile_definitions(opencv_test_gapi PRIVATE HAVE_ONNX=1) + ocv_target_link_libraries(opencv_test_gapi PRIVATE ${ONNX_LIBRARY}) + endif() +endif() + ocv_add_perf_tests() ocv_add_samples() diff --git a/inference-engine/thirdparty/fluid/modules/gapi/cmake/standalone.cmake b/inference-engine/thirdparty/fluid/modules/gapi/cmake/standalone.cmake index ca546975241..5cc57d82694 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/cmake/standalone.cmake +++ b/inference-engine/thirdparty/fluid/modules/gapi/cmake/standalone.cmake @@ -15,6 +15,8 @@ file(GLOB FLUID_includes "${FLUID_ROOT}/include/opencv2/*.hpp" "${FLUID_ROOT}/include/opencv2/gapi/own/*.hpp" "${FLUID_ROOT}/include/opencv2/gapi/fluid/*.hpp") file(GLOB FLUID_sources "${FLUID_ROOT}/src/api/g*.cpp" + "${FLUID_ROOT}/src/api/rmat.cpp" + "${FLUID_ROOT}/src/api/media.cpp" "${FLUID_ROOT}/src/compiler/*.cpp" "${FLUID_ROOT}/src/compiler/passes/*.cpp" "${FLUID_ROOT}/src/executor/*.cpp" diff --git a/inference-engine/thirdparty/fluid/modules/gapi/doc/slides/gapi_overview.org b/inference-engine/thirdparty/fluid/modules/gapi/doc/slides/gapi_overview.org index f2bd39fa123..676c914b664 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/doc/slides/gapi_overview.org +++ b/inference-engine/thirdparty/fluid/modules/gapi/doc/slides/gapi_overview.org @@ -1,10 +1,10 @@ -#+TITLE: OpenCV 4.0 Graph API +#+TITLE: OpenCV 4.4 Graph API #+AUTHOR: Dmitry Matveev\newline Intel Corporation #+OPTIONS: H:2 toc:t num:t #+LATEX_CLASS: beamer #+LATEX_CLASS_OPTIONS: [presentation] #+LATEX_HEADER: \usepackage{transparent} \usepackage{listings} \usepackage{pgfplots} \usepackage{mtheme.sty/beamerthememetropolis} -#+LATEX_HEADER: \setbeamertemplate{frame footer}{OpenCV 4.0 G-API: Overview and programming by example} +#+LATEX_HEADER: \setbeamertemplate{frame footer}{OpenCV 4.4 G-API: Overview and programming by example} #+BEAMER_HEADER: \subtitle{Overview and programming by example} #+BEAMER_HEADER: \titlegraphic{ \vspace*{3cm}\hspace*{5cm} {\transparent{0.2}\includegraphics[height=\textheight]{ocv_logo.eps}}} #+COLUMNS: %45ITEM %10BEAMER_ENV(Env) %10BEAMER_ACT(Act) %4BEAMER_COL(Col) %8BEAMER_OPT(Opt) @@ -21,7 +21,7 @@ - OpenCV meets C++, ~cv::Mat~ replaces ~IplImage*~; -*** Version 3.0: -- Welcome Transparent API (T-API) +*** Version 3.0 -- Welcome Transparent API (T-API) - ~cv::UMat~ is introduced as a /transparent/ addition to ~cv::Mat~; @@ -32,7 +32,7 @@ ** OpenCV evolution in one slide (cont'd) # FIXME: Learn proper page-breaking! -*** Version 4.0: -- Welcome Graph API (G-API) +*** Version 4.0 -- Welcome Graph API (G-API) - A new separate module (not a full library rewrite); - A framework (or even a /meta/-framework); @@ -45,6 +45,24 @@ - Kernels can be written in unconstrained platform-native code; - Halide can serve as a backend (one of many). +** OpenCV evolution in one slide (cont'd) +# FIXME: Learn proper page-breaking! + +*** Version 4.2 -- New horizons + +- Introduced in-graph inference via OpenVINO™ Toolkit; +- Introduced video-oriented Streaming execution mode; +- Extended focus from individual image processing to the full + application pipeline optimization. + +*** Version 4.4 -- More on video + +- Introduced a notion of stateful kernels; + - The road to object tracking, background subtraction, etc. in the + graph; +- Added more video-oriented operations (feature detection, Optical + flow). + ** Why G-API? *** Why introduce a new execution model? @@ -80,7 +98,7 @@ - *Heterogeneity* gets extra benefits like: - Avoiding unnecessary data transfers; - Shadowing transfer costs with parallel host co-execution; - - Increasing system throughput with frame-level pipelining. + - Improving system throughput with frame-level pipelining. * Programming with G-API @@ -96,7 +114,34 @@ - What data objects are /inputs/ to the graph? - What are its /outputs/? -** A code is worth a thousand words +** The code is worth a thousand words + :PROPERTIES: + :BEAMER_opt: shrink=42 + :END: + +#+BEGIN_SRC C++ +#include // G-API framework header +#include // cv::gapi::blur() +#include // cv::imread/imwrite + +int main(int argc, char *argv[]) { + if (argc < 3) return 1; + + cv::GMat in; // Express the graph: + cv::GMat out = cv::gapi::blur(in, cv::Size(3,3)); // `out` is a result of `blur` of `in` + + cv::Mat in_mat = cv::imread(argv[1]); // Get the real data + cv::Mat out_mat; // Output buffer (may be empty) + + cv::GComputation(cv::GIn(in), cv::GOut(out)) // Declare a graph from `in` to `out` + .apply(cv::gin(in_mat), cv::gout(out_mat)); // ...and run it immediately + + cv::imwrite(argv[2], out_mat); // Save the result + return 0; +} +#+END_SRC + +** The code is worth a thousand words :PROPERTIES: :BEAMER_opt: shrink=42 :END: @@ -161,7 +206,7 @@ int main(int argc, char *argv[]) { } #+END_SRC -** A code is worth a thousand words (cont'd) +** The code is worth a thousand words (cont'd) # FIXME: sections!!! *** What we have just learned? @@ -183,59 +228,82 @@ cv::GComputation(cv::GIn(...), cv::GOut(...)) ** On data objects Graph *protocol* defines what arguments a computation was defined on - (both inputs and outputs), and what are the *shapes* (or types) of - those arguments: +(both inputs and outputs), and what are the *shapes* (or types) of +those arguments: - | *Shape* | *Argument* | Size | - |-------------+------------------+-----------------------------| - | ~GMat~ | ~Mat~ | Static; defined during | - | | | graph compilation | - |-------------+------------------+-----------------------------| - | ~GScalar~ | ~Scalar~ | 4 x ~double~ | - |-------------+------------------+-----------------------------| - | ~GArray~ | ~std::vector~ | Dynamic; defined in runtime | + | *Shape* | *Argument* | Size | + |--------------+------------------+-----------------------------| + | ~GMat~ | ~Mat~ | Static; defined during | + | | | graph compilation | + |--------------+------------------+-----------------------------| + | ~GScalar~ | ~Scalar~ | 4 x ~double~ | + |--------------+------------------+-----------------------------| + | ~GArray~ | ~std::vector~ | Dynamic; defined in runtime | + |--------------+------------------+-----------------------------| + | ~GOpaque~ | ~T~ | Static, ~sizeof(T)~ | ~GScalar~ may be value-initialized at construction time to allow expressions like ~GMat a = 2*(b + 1)~. -** Customization example +** On operations and kernels + :PROPERTIES: + :BEAMER_opt: shrink=22 + :END: -*** Tuning the execution +*** :B_block:BMCOL: + :PROPERTIES: + :BEAMER_env: block + :BEAMER_col: 0.45 + :END: -- Graph execution model is defined by kernels which are used; -- Kernels can be specified in graph compilation arguments: - #+LaTeX: {\footnotesize - #+BEGIN_SRC C++ - #include - #include - ... - auto pkg = gapi::combine(gapi::core::fluid::kernels(), - gapi::imgproc::fluid::kernels(), - cv::unite_policy::KEEP); - sobel.apply(in_mat, out_mat, compile_args(pkg)); - #+END_SRC - #+LaTeX: } -- OpenCL backend can be used in the same way; - #+LaTeX: {\footnotesize -- *NOTE*: ~cv::unite_policy~ has been removed in OpenCV 4.1.1. - #+LaTeX: } +- Graphs are built with *Operations* over virtual *Data*; +- *Operations* define interfaces (literally); +- *Kernels* are implementations to *Operations* (like in OOP); +- An *Operation* is platform-agnostic, a *kernel* is not; +- *Kernels* are implemented for *Backends*, the latter provide + APIs to write kernels; +- Users can /add/ their *own* operations and kernels, + and also /redefine/ "standard" kernels their *own* way. -** Operations and Kernels +*** :B_block:BMCOL: + :PROPERTIES: + :BEAMER_env: block + :BEAMER_col: 0.45 + :END: -*** Specifying a kernel package +#+BEGIN_SRC dot :file "000-ops-kernels.eps" :cmdline "-Kdot -Teps" +digraph G { +node [shape=box]; +rankdir=BT; -- A *kernel* is an implementation of *operation* (= interface); -- A *kernel package* hosts kernels that G-API should use; -- Kernels are written for different *backends* and using their APIs; -- Two kernel packages can be *merged* into a single one; -- User can safely supply his *own kernels* to either /replace/ or - /augment/ the default package. - - Yes, even the standard kernels can be /overwritten/ by user from - the outside! -- *Heterogeneous* kernel package hosts kernels of different backends. +Gr [label="Graph"]; +Op [label="Operation\nA"]; +{rank=same +Impl1 [label="Kernel\nA:2"]; +Impl2 [label="Kernel\nA:1"]; +} -** Operations and Kernels (cont'd) -# FIXME!!! +Op -> Gr [dir=back, label="'consists of'"]; +Impl1 -> Op []; +Impl2 -> Op [label="'is implemented by'"]; + +node [shape=note,style=dashed]; +{rank=same +Op; +CommentOp [label="Abstract:\ndeclared via\nG_API_OP()"]; +} +{rank=same +Comment1 [label="Platform:\ndefined with\nOpenCL backend"]; +Comment2 [label="Platform:\ndefined with\nOpenCV backend"]; +} + +CommentOp -> Op [constraint=false, style=dashed, arrowhead=none]; +Comment1 -> Impl1 [style=dashed, arrowhead=none]; +Comment2 -> Impl2 [style=dashed, arrowhead=none]; +} +#+END_SRC + +** On operations and kernels (cont'd) *** Defining an operation @@ -245,16 +313,43 @@ Graph *protocol* defines what arguments a computation was defined on - Metadata callback -- describe what is the output value format(s), given the input and arguments. - Use ~OpType::on(...)~ to use a new kernel ~OpType~ to construct graphs. + #+LaTeX: {\footnotesize #+BEGIN_SRC C++ -G_TYPED_KERNEL(GSqrt,,"org.opencv.core.math.sqrt") { +G_API_OP(GSqrt,,"org.opencv.core.math.sqrt") { static GMatDesc outMeta(GMatDesc in) { return in; } }; #+END_SRC #+LaTeX: } -** Operations and Kernels (cont'd) -# FIXME!!! +** On operations and kernels (cont'd) + +*** ~GSqrt~ vs. ~cv::gapi::sqrt()~ + +- How a *type* relates to a *functions* from the example? +- These functions are just wrappers over ~::on~: + #+LaTeX: {\scriptsize + #+BEGIN_SRC C++ + G_API_OP(GSqrt,,"org.opencv.core.math.sqrt") { + static GMatDesc outMeta(GMatDesc in) { return in; } + }; + GMat gapi::sqrt(const GMat& src) { return GSqrt::on(src); } + #+END_SRC + #+LaTeX: } +- Why -- Doxygen, default parameters, 1:n mapping: + #+LaTeX: {\scriptsize + #+BEGIN_SRC C++ + cv::GMat custom::unsharpMask(const cv::GMat &src, + const int sigma, + const float strength) { + cv::GMat blurred = cv::gapi::medianBlur(src, sigma); + cv::GMat laplacian = cv::gapi::Laplacian(blurred, CV_8U); + return (src - (laplacian * strength)); + } + #+END_SRC + #+LaTeX: } + +** On operations and kernels (cont'd) *** Implementing an operation @@ -297,6 +392,467 @@ G_TYPED_KERNEL(GSqrt,,"org.opencv.core.math.sqrt") { - Note ~run~ changes signature but still is derived from the operation signature. +** Operations and Kernels (cont'd) + +*** Specifying which kernels to use + +- Graph execution model is defined by kernels which are available/used; +- Kernels can be specified via the graph compilation arguments: + #+LaTeX: {\footnotesize + #+BEGIN_SRC C++ + #include + #include + ... + auto pkg = cv::gapi::combine(cv::gapi::core::fluid::kernels(), + cv::gapi::imgproc::fluid::kernels()); + sobel.apply(in_mat, out_mat, cv::compile_args(pkg)); + #+END_SRC + #+LaTeX: } +- Users can combine kernels of different backends and G-API will partition + the execution among those automatically. + +** Heterogeneity in G-API + :PROPERTIES: + :BEAMER_opt: shrink=35 + :END: +*** Automatic subgraph partitioning in G-API +*** :B_block:BMCOL: + :PROPERTIES: + :BEAMER_env: block + :BEAMER_col: 0.18 + :END: + +#+BEGIN_SRC dot :file "010-hetero-init.eps" :cmdline "-Kdot -Teps" +digraph G { +rankdir=TB; +ranksep=0.3; + +node [shape=box margin=0 height=0.25]; +A; B; C; + +node [shape=ellipse]; +GMat0; +GMat1; +GMat2; +GMat3; + +GMat0 -> A -> GMat1 -> B -> GMat2; +GMat2 -> C; +GMat0 -> C -> GMat3 + +subgraph cluster {style=invis; A; GMat1; B; GMat2; C}; +} +#+END_SRC + +The initial graph: operations are not resolved yet. + +*** :B_block:BMCOL: + :PROPERTIES: + :BEAMER_env: block + :BEAMER_col: 0.18 + :END: + +#+BEGIN_SRC dot :file "011-hetero-homo.eps" :cmdline "-Kdot -Teps" +digraph G { +rankdir=TB; +ranksep=0.3; + +node [shape=box margin=0 height=0.25]; +A; B; C; + +node [shape=ellipse]; +GMat0; +GMat1; +GMat2; +GMat3; + +GMat0 -> A -> GMat1 -> B -> GMat2; +GMat2 -> C; +GMat0 -> C -> GMat3 + +subgraph cluster {style=filled;color=azure2; A; GMat1; B; GMat2; C}; +} +#+END_SRC + +All operations are handled by the same backend. + +*** :B_block:BMCOL: + :PROPERTIES: + :BEAMER_env: block + :BEAMER_col: 0.18 + :END: + +#+BEGIN_SRC dot :file "012-hetero-a.eps" :cmdline "-Kdot -Teps" +digraph G { +rankdir=TB; +ranksep=0.3; + +node [shape=box margin=0 height=0.25]; +A; B; C; + +node [shape=ellipse]; +GMat0; +GMat1; +GMat2; +GMat3; + +GMat0 -> A -> GMat1 -> B -> GMat2; +GMat2 -> C; +GMat0 -> C -> GMat3 + +subgraph cluster_1 {style=filled;color=azure2; A; GMat1; B; } +subgraph cluster_2 {style=filled;color=ivory2; C}; +} +#+END_SRC + +~A~ & ~B~ are of backend ~1~, ~C~ is of backend ~2~. + +*** :B_block:BMCOL: + :PROPERTIES: + :BEAMER_env: block + :BEAMER_col: 0.18 + :END: + +#+BEGIN_SRC dot :file "013-hetero-b.eps" :cmdline "-Kdot -Teps" +digraph G { +rankdir=TB; +ranksep=0.3; + +node [shape=box margin=0 height=0.25]; +A; B; C; + +node [shape=ellipse]; +GMat0; +GMat1; +GMat2; +GMat3; + +GMat0 -> A -> GMat1 -> B -> GMat2; +GMat2 -> C; +GMat0 -> C -> GMat3 + +subgraph cluster_1 {style=filled;color=azure2; A}; +subgraph cluster_2 {style=filled;color=ivory2; B}; +subgraph cluster_3 {style=filled;color=azure2; C}; +} +#+END_SRC + +~A~ & ~C~ are of backend ~1~, ~B~ is of backend ~2~. + +** Heterogeneity in G-API + +*** Heterogeneity summary + +- G-API automatically partitions its graph in subgraphs (called "islands") + based on the available kernels; +- Adjacent kernels taken from the same backend are "fused" into the same + "island"; +- G-API implements a two-level execution model: + - Islands are executed at the top level by a G-API's *Executor*; + - Island internals are run at the bottom level by its *Backend*; +- G-API fully delegates the low-level execution and memory management to backends. + +* Inference and Streaming + +** Inference with G-API + +*** In-graph inference example + +- Starting with OpencV 4.2 (2019), G-API allows to integrate ~infer~ + operations into the graph: + #+LaTeX: {\scriptsize + #+BEGIN_SRC C++ + G_API_NET(ObjDetect, , "pdf.example.od"); + + cv::GMat in; + cv::GMat blob = cv::gapi::infer(bgr); + cv::GOpaque size = cv::gapi::streaming::size(bgr); + cv::GArray objs = cv::gapi::streaming::parseSSD(blob, size); + cv::GComputation pipelne(cv::GIn(in), cv::GOut(objs)); + #+END_SRC + #+LaTeX: } +- Starting with OpenCV 4.5 (2020), G-API will provide more streaming- + and NN-oriented operations out of the box. + +** Inference with G-API + +*** What is the difference? + +- ~ObjDetect~ is not an operation, ~cv::gapi::infer~ is; +- ~cv::gapi::infer~ is a *generic* operation, where ~T=ObjDetect~ describes + the calling convention: + - How many inputs the network consumes, + - How many outputs the network produces. +- Inference data types are ~GMat~ only: + - Representing an image, then preprocessed automatically; + - Representing a blob (n-dimensional ~Mat~), then passed as-is. +- Inference *backends* only need to implement a single generic operation ~infer~. + +** Inference with G-API + +*** But how does it run? + +- Since ~infer~ is an *Operation*, backends may provide *Kernels* implenting it; +- The only publicly available inference backend now is *OpenVINO™*: + - Brings its ~infer~ kernel atop of the Inference Engine; +- NN model data is passed through G-API compile arguments (like kernels); +- Every NN backend provides its own structure to configure the network (like + a kernel API). + +** Inference with G-API + +*** Passing OpenVINO™ parameters to G-API + +- ~ObjDetect~ example: + #+LaTeX: {\footnotesize + #+BEGIN_SRC C++ + auto face_net = cv::gapi::ie::Params { + face_xml_path, // path to the topology IR + face_bin_path, // path to the topology weights + face_device_string, // OpenVINO plugin (device) string + }; + auto networks = cv::gapi::networks(face_net); + pipeline.compile(.., cv::compile_args(..., networks)); + #+END_SRC + #+LaTeX: } +- ~AgeGender~ requires binding Op's outputs to NN layers: + #+LaTeX: {\footnotesize + #+BEGIN_SRC C++ + auto age_net = cv::gapi::ie::Params { + ... + }.cfgOutputLayers({"age_conv3", "prob"}); // array ! + #+END_SRC + #+LaTeX: } + +** Streaming with G-API + +#+BEGIN_SRC dot :file 020-fd-demo.eps :cmdline "-Kdot -Teps" +digraph { + rankdir=LR; + node [shape=box]; + + cap [label=Capture]; + dec [label=Decode]; + res [label=Resize]; + cnn [label=Infer]; + vis [label=Visualize]; + + cap -> dec; + dec -> res; + res -> cnn; + cnn -> vis; +} +#+END_SRC +Anatomy of a regular video analytics application + +** Streaming with G-API + +#+BEGIN_SRC dot :file 021-fd-serial.eps :cmdline "-Kdot -Teps" +digraph { + node [shape=box margin=0 width=0.3 height=0.4] + nodesep=0.2; + rankdir=LR; + + subgraph cluster0 { + colorscheme=blues9 + pp [label="..." shape=plaintext]; + v0 [label=V]; + label="Frame N-1"; + color=7; + } + + subgraph cluster1 { + colorscheme=blues9 + c1 [label=C]; + d1 [label=D]; + r1 [label=R]; + i1 [label=I]; + v1 [label=V]; + label="Frame N"; + color=6; + } + + subgraph cluster2 { + colorscheme=blues9 + c2 [label=C]; + nn [label="..." shape=plaintext]; + label="Frame N+1"; + color=5; + } + + c1 -> d1 -> r1 -> i1 -> v1; + + pp-> v0; + v0 -> c1 [style=invis]; + v1 -> c2 [style=invis]; + c2 -> nn; +} +#+END_SRC +Serial execution of the sample video analytics application + +** Streaming with G-API + :PROPERTIES: + :BEAMER_opt: shrink + :END: + +#+BEGIN_SRC dot :file 022-fd-pipelined.eps :cmdline "-Kdot -Teps" +digraph { + nodesep=0.2; + ranksep=0.2; + node [margin=0 width=0.4 height=0.2]; + node [shape=plaintext] + Camera [label="Camera:"]; + GPU [label="GPU:"]; + FPGA [label="FPGA:"]; + CPU [label="CPU:"]; + Time [label="Time:"]; + t6 [label="T6"]; + t7 [label="T7"]; + t8 [label="T8"]; + t9 [label="T9"]; + t10 [label="T10"]; + tnn [label="..."]; + + node [shape=box margin=0 width=0.4 height=0.4 colorscheme=blues9] + node [color=9] V3; + node [color=8] F4; V4; + node [color=7] DR5; F5; V5; + node [color=6] C6; DR6; F6; V6; + node [color=5] C7; DR7; F7; V7; + node [color=4] C8; DR8; F8; + node [color=3] C9; DR9; + node [color=2] C10; + + {rank=same; rankdir=LR; Camera C6 C7 C8 C9 C10} + Camera -> C6 -> C7 -> C8 -> C9 -> C10 [style=invis]; + + {rank=same; rankdir=LR; GPU DR5 DR6 DR7 DR8 DR9} + GPU -> DR5 -> DR6 -> DR7 -> DR8 -> DR9 [style=invis]; + + C6 -> DR5 [style=invis]; + C6 -> DR6 [constraint=false]; + C7 -> DR7 [constraint=false]; + C8 -> DR8 [constraint=false]; + C9 -> DR9 [constraint=false]; + + {rank=same; rankdir=LR; FPGA F4 F5 F6 F7 F8} + FPGA -> F4 -> F5 -> F6 -> F7 -> F8 [style=invis]; + + DR5 -> F4 [style=invis]; + DR5 -> F5 [constraint=false]; + DR6 -> F6 [constraint=false]; + DR7 -> F7 [constraint=false]; + DR8 -> F8 [constraint=false]; + + {rank=same; rankdir=LR; CPU V3 V4 V5 V6 V7} + CPU -> V3 -> V4 -> V5 -> V6 -> V7 [style=invis]; + + F4 -> V3 [style=invis]; + F4 -> V4 [constraint=false]; + F5 -> V5 [constraint=false]; + F6 -> V6 [constraint=false]; + F7 -> V7 [constraint=false]; + + {rank=same; rankdir=LR; Time t6 t7 t8 t9 t10 tnn} + Time -> t6 -> t7 -> t8 -> t9 -> t10 -> tnn [style=invis]; + + CPU -> Time [style=invis]; + V3 -> t6 [style=invis]; + V4 -> t7 [style=invis]; + V5 -> t8 [style=invis]; + V6 -> t9 [style=invis]; + V7 -> t10 [style=invis]; +} +#+END_SRC +Pipelined execution for the video analytics application + +** Streaming with G-API: Example + +**** Serial mode (4.0) :B_block:BMCOL: + :PROPERTIES: + :BEAMER_env: block + :BEAMER_col: 0.45 + :END: +#+LaTeX: {\tiny +#+BEGIN_SRC C++ +pipeline = cv::GComputation(...); + +cv::VideoCapture cap(input); +cv::Mat in_frame; +std::vector out_faces; + +while (cap.read(in_frame)) { + pipeline.apply(cv::gin(in_frame), + cv::gout(out_faces), + cv::compile_args(kernels, + networks)); + // Process results + ... +} +#+END_SRC +#+LaTeX: } + +**** Streaming mode (since 4.2) :B_block:BMCOL: + :PROPERTIES: + :BEAMER_env: block + :BEAMER_col: 0.45 + :END: +#+LaTeX: {\tiny +#+BEGIN_SRC C++ +pipeline = cv::GComputation(...); + +auto in_src = cv::gapi::wip::make_src + (input) +auto cc = pipeline.compileStreaming + (cv::compile_args(kernels, networks)) +cc.setSource(cv::gin(in_src)); +cc.start(); + +std::vector out_faces; +while (cc.pull(cv::gout(out_faces))) { + // Process results + ... +} +#+END_SRC +#+LaTeX: } + +**** More information + +#+LaTeX: {\footnotesize +https://opencv.org/hybrid-cv-dl-pipelines-with-opencv-4-4-g-api/ +#+LaTeX: } + +* Latest features +** Latest features +*** Python API + +- Initial Python3 binding is available now in ~master~ (future 4.5); +- Only basic CV functionality is supported (~core~ & ~imgproc~ namespaces, + selecting backends); +- Adding more programmability, inference, and streaming is next. + +** Latest features +*** Python API + +#+LaTeX: {\footnotesize +#+BEGIN_SRC Python +import numpy as np +import cv2 as cv + +sz = (1280, 720) +in1 = np.random.randint(0, 100, sz).astype(np.uint8) +in2 = np.random.randint(0, 100, sz).astype(np.uint8) + +g_in1 = cv.GMat() +g_in2 = cv.GMat() +g_out = cv.gapi.add(g_in1, g_in2) +gr = cv.GComputation(g_in1, g_in2, g_out) + +pkg = cv.gapi.core.fluid.kernels() +out = gr.apply(in1, in2, args=cv.compile_args(pkg)) +#+END_SRC +#+LaTeX: } + * Understanding the "G-Effect" ** Understanding the "G-Effect" @@ -384,15 +940,22 @@ speed-up on QVGA taken as 1.0). * Resources on G-API ** Resources on G-API - + :PROPERTIES: + :BEAMER_opt: shrink + :END: *** Repository - https://github.com/opencv/opencv (see ~modules/gapi~) -- Integral part of OpenCV starting version 4.0; + +*** Article + +- https://opencv.org/hybrid-cv-dl-pipelines-with-opencv-4-4-g-api/ *** Documentation -- https://docs.opencv.org/master/d0/d1e/gapi.html -- A tutorial and a class reference are there as well. +- https://docs.opencv.org/4.4.0/d0/d1e/gapi.html + +*** Tutorials +- https://docs.opencv.org/4.4.0/df/d7e/tutorial_table_of_content_gapi.html * Thank you! diff --git a/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi.hpp b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi.hpp index 2c99c8650d2..e4b20214796 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi.hpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi.hpp @@ -24,10 +24,18 @@ #include #include +#include +#include +#include #include #include #include #include #include +// Include these files here to avoid cyclic dependency between +// Desync & GKernel & GComputation & GStreamingCompiled. +#include +#include + #endif // OPENCV_GAPI_HPP diff --git a/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/core.hpp b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/core.hpp index 8ecba2b9d6b..6fca437d5a7 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/core.hpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/core.hpp @@ -2,7 +2,7 @@ // It is subject to the license terms in the LICENSE file found in the top-level directory // of this distribution and at http://opencv.org/license.html. // -// Copyright (C) 2018 Intel Corporation +// Copyright (C) 2018-2020 Intel Corporation #ifndef OPENCV_GAPI_CORE_HPP @@ -26,6 +26,7 @@ @defgroup gapi_transform Graph API: Image and channel composition functions @} */ + namespace cv { namespace gapi { namespace core { using GMat2 = std::tuple; @@ -308,6 +309,13 @@ namespace core { } }; + G_TYPED_KERNEL(GCountNonZero, (GMat)>, "org.opencv.core.matrixop.countNonZero") { + static GOpaqueDesc outMeta(GMatDesc in) { + GAPI_Assert(in.chan == 1); + return empty_gopaque_desc(); + } + }; + G_TYPED_KERNEL(GAddW, , "org.opencv.core.matrixop.addweighted") { static GMatDesc outMeta(GMatDesc a, double, GMatDesc b, double, double, int ddepth) { if (ddepth == -1) @@ -502,18 +510,93 @@ namespace core { } }; - G_TYPED_KERNEL(GSize, (GMat)>, "org.opencv.core.size") { - static GOpaqueDesc outMeta(const GMatDesc&) { - return empty_gopaque_desc(); + G_TYPED_KERNEL( + GKMeansND, + ,GMat,GMat>(GMat,int,GMat,TermCriteria,int,KmeansFlags)>, + "org.opencv.core.kmeansND") { + + static std::tuple + outMeta(const GMatDesc& in, int K, const GMatDesc& bestLabels, const TermCriteria&, int, + KmeansFlags flags) { + GAPI_Assert(in.depth == CV_32F); + std::vector amount_n_dim = detail::checkVector(in); + int amount = amount_n_dim[0], dim = amount_n_dim[1]; + if (amount == -1) // Mat with height != 1, width != 1, channels != 1 given + { // which means that kmeans will consider the following: + amount = in.size.height; + dim = in.size.width * in.chan; + } + // kmeans sets these labels' sizes when no bestLabels given: + GMatDesc out_labels(CV_32S, 1, Size{1, amount}); + // kmeans always sets these centers' sizes: + GMatDesc centers (CV_32F, 1, Size{dim, K}); + if (flags & KMEANS_USE_INITIAL_LABELS) + { + GAPI_Assert(bestLabels.depth == CV_32S); + int labels_amount = detail::checkVector(bestLabels, 1u); + GAPI_Assert(labels_amount == amount); + out_labels = bestLabels; // kmeans preserves bestLabels' sizes if given + } + return std::make_tuple(empty_gopaque_desc(), out_labels, centers); } }; - G_TYPED_KERNEL(GSizeR, (GOpaque)>, "org.opencv.core.sizeR") { - static GOpaqueDesc outMeta(const GOpaqueDesc&) { - return empty_gopaque_desc(); + G_TYPED_KERNEL( + GKMeansNDNoInit, + ,GMat,GMat>(GMat,int,TermCriteria,int,KmeansFlags)>, + "org.opencv.core.kmeansNDNoInit") { + + static std::tuple + outMeta(const GMatDesc& in, int K, const TermCriteria&, int, KmeansFlags flags) { + GAPI_Assert( !(flags & KMEANS_USE_INITIAL_LABELS) ); + GAPI_Assert(in.depth == CV_32F); + std::vector amount_n_dim = detail::checkVector(in); + int amount = amount_n_dim[0], dim = amount_n_dim[1]; + if (amount == -1) // Mat with height != 1, width != 1, channels != 1 given + { // which means that kmeans will consider the following: + amount = in.size.height; + dim = in.size.width * in.chan; + } + GMatDesc out_labels(CV_32S, 1, Size{1, amount}); + GMatDesc centers (CV_32F, 1, Size{dim, K}); + return std::make_tuple(empty_gopaque_desc(), out_labels, centers); } }; -} + + G_TYPED_KERNEL(GKMeans2D, ,GArray,GArray> + (GArray,int,GArray,TermCriteria,int,KmeansFlags)>, + "org.opencv.core.kmeans2D") { + static std::tuple + outMeta(const GArrayDesc&,int,const GArrayDesc&,const TermCriteria&,int,KmeansFlags) { + return std::make_tuple(empty_gopaque_desc(), empty_array_desc(), empty_array_desc()); + } + }; + + G_TYPED_KERNEL(GKMeans3D, ,GArray,GArray> + (GArray,int,GArray,TermCriteria,int,KmeansFlags)>, + "org.opencv.core.kmeans3D") { + static std::tuple + outMeta(const GArrayDesc&,int,const GArrayDesc&,const TermCriteria&,int,KmeansFlags) { + return std::make_tuple(empty_gopaque_desc(), empty_array_desc(), empty_array_desc()); + } + }; +} // namespace core + +namespace streaming { + +// Operations for Streaming (declared in this header for convenience) +G_TYPED_KERNEL(GSize, (GMat)>, "org.opencv.streaming.size") { + static GOpaqueDesc outMeta(const GMatDesc&) { + return empty_gopaque_desc(); + } +}; + +G_TYPED_KERNEL(GSizeR, (GOpaque)>, "org.opencv.streaming.sizeR") { + static GOpaqueDesc outMeta(const GOpaqueDesc&) { + return empty_gopaque_desc(); + } +}; +} // namespace streaming //! @addtogroup gapi_math //! @{ @@ -755,6 +838,7 @@ Supported matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref @note Function textual ID is "org.opencv.core.math.mean" @param src input matrix. +@sa countNonZero, min, max */ GAPI_EXPORTS_W GScalar mean(const GMat& src); @@ -856,7 +940,7 @@ Supported input matrix data types are @ref CV_8UC1, @ref CV_16UC1, @ref CV_16SC1 @note Function textual ID is "org.opencv.core.pixelwise.compare.cmpGT" @param src1 first input matrix. @param src2 second input matrix/scalar of the same depth as first input matrix. -@sa min, max, threshold, cmpLE, cmpGE, cmpLS +@sa min, max, threshold, cmpLE, cmpGE, cmpLT */ GAPI_EXPORTS GMat cmpGT(const GMat& src1, const GMat& src2); /** @overload @@ -908,7 +992,7 @@ Supported input matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @note Function textual ID is "org.opencv.core.pixelwise.compare.cmpGE" @param src1 first input matrix. @param src2 second input matrix/scalar of the same depth as first input matrix. -@sa min, max, threshold, cmpLE, cmpGT, cmpLS +@sa min, max, threshold, cmpLE, cmpGT, cmpLT */ GAPI_EXPORTS GMat cmpGE(const GMat& src1, const GMat& src2); /** @overload @@ -934,7 +1018,7 @@ Supported input matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @note Function textual ID is "org.opencv.core.pixelwise.compare.cmpLE" @param src1 first input matrix. @param src2 second input matrix/scalar of the same depth as first input matrix. -@sa min, max, threshold, cmpGT, cmpGE, cmpLS +@sa min, max, threshold, cmpGT, cmpGE, cmpLT */ GAPI_EXPORTS GMat cmpLE(const GMat& src1, const GMat& src2); /** @overload @@ -1012,7 +1096,7 @@ Supported matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref */ GAPI_EXPORTS GMat bitwise_and(const GMat& src1, const GMat& src2); /** @overload -@note Function textual ID is "org.opencv.core.pixelwise.compare.bitwise_andS" +@note Function textual ID is "org.opencv.core.pixelwise.bitwise_andS" @param src1 first input matrix. @param src2 scalar, which will be per-lemenetly conjuncted with elements of src1. */ @@ -1036,7 +1120,7 @@ Supported matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref */ GAPI_EXPORTS GMat bitwise_or(const GMat& src1, const GMat& src2); /** @overload -@note Function textual ID is "org.opencv.core.pixelwise.compare.bitwise_orS" +@note Function textual ID is "org.opencv.core.pixelwise.bitwise_orS" @param src1 first input matrix. @param src2 scalar, which will be per-lemenetly disjuncted with elements of src1. */ @@ -1061,7 +1145,7 @@ Supported matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref */ GAPI_EXPORTS GMat bitwise_xor(const GMat& src1, const GMat& src2); /** @overload -@note Function textual ID is "org.opencv.core.pixelwise.compare.bitwise_xorS" +@note Function textual ID is "org.opencv.core.pixelwise.bitwise_xorS" @param src1 first input matrix. @param src2 scalar, for which per-lemenet "logical or" operation on elements of src1 will be performed. */ @@ -1121,7 +1205,7 @@ Supported input matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @note Function textual ID is "org.opencv.core.matrixop.min" @param src1 first input matrix. @param src2 second input matrix of the same size and depth as src1. -@sa max, compareEqual, compareLess, compareLessEqual +@sa max, cmpEQ, cmpLT, cmpLE */ GAPI_EXPORTS GMat min(const GMat& src1, const GMat& src2); @@ -1138,7 +1222,7 @@ Supported matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref @note Function textual ID is "org.opencv.core.matrixop.max" @param src1 first input matrix. @param src2 second input matrix of the same size and depth as src1. -@sa min, compare, compareEqual, compareGreater, compareGreaterEqual +@sa min, compare, cmpEQ, cmpGT, cmpGE */ GAPI_EXPORTS GMat max(const GMat& src1, const GMat& src2); @@ -1184,10 +1268,23 @@ Supported matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref @note Function textual ID is "org.opencv.core.matrixop.sum" @param src input matrix. -@sa min, max +@sa countNonZero, mean, min, max */ GAPI_EXPORTS GScalar sum(const GMat& src); +/** @brief Counts non-zero array elements. + +The function returns the number of non-zero elements in src : +\f[\sum _{I: \; \texttt{src} (I) \ne0 } 1\f] + +Supported matrix data types are @ref CV_8UC1, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1. + +@note Function textual ID is "org.opencv.core.matrixop.countNonZero" +@param src input single-channel matrix. +@sa mean, min, max +*/ +GAPI_EXPORTS GOpaque countNonZero(const GMat& src); + /** @brief Calculates the weighted sum of two matrices. The function addWeighted calculates the weighted sum of two matrices as follows: @@ -1324,14 +1421,14 @@ Output matrix must be of the same size and depth as src. types. @param type thresholding type (see the cv::ThresholdTypes). -@sa min, max, cmpGT, cmpLE, cmpGE, cmpLS +@sa min, max, cmpGT, cmpLE, cmpGE, cmpLT */ GAPI_EXPORTS GMat threshold(const GMat& src, const GScalar& thresh, const GScalar& maxval, int type); /** @overload This function applicable for all threshold types except CV_THRESH_OTSU and CV_THRESH_TRIANGLE @note Function textual ID is "org.opencv.core.matrixop.thresholdOT" */ -GAPI_EXPORTS std::tuple threshold(const GMat& src, const GScalar& maxval, int type); +GAPI_EXPORTS_W std::tuple threshold(const GMat& src, const GScalar& maxval, int type); /** @brief Applies a range-level threshold to each matrix element. @@ -1411,41 +1508,77 @@ Output image size will have the size dsize, the depth of output is the same as o */ GAPI_EXPORTS GMatP resizeP(const GMatP& src, const Size& dsize, int interpolation = cv::INTER_LINEAR); -/** @brief Creates one 3-channel (4-channel) matrix out of 3(4) single-channel ones. +/** @brief Creates one 4-channel matrix out of 4 single-channel ones. The function merges several matrices to make a single multi-channel matrix. That is, each element of the output matrix will be a concatenation of the elements of the input matrices, where elements of i-th input matrix are treated as mv[i].channels()-element vectors. -Input matrix must be of @ref CV_8UC3 (@ref CV_8UC4) type. +Output matrix must be of @ref CV_8UC4 type. -The function split3/split4 does the reverse operation. +The function split4 does the reverse operation. -@note Function textual ID for merge3 is "org.opencv.core.transform.merge3" -@note Function textual ID for merge4 is "org.opencv.core.transform.merge4" +@note + - Function textual ID is "org.opencv.core.transform.merge4" -@param src1 first input matrix to be merged -@param src2 second input matrix to be merged -@param src3 third input matrix to be merged -@param src4 fourth input matrix to be merged -@sa split4, split3 +@param src1 first input @ref CV_8UC1 matrix to be merged. +@param src2 second input @ref CV_8UC1 matrix to be merged. +@param src3 third input @ref CV_8UC1 matrix to be merged. +@param src4 fourth input @ref CV_8UC1 matrix to be merged. +@sa merge3, split4, split3 */ GAPI_EXPORTS GMat merge4(const GMat& src1, const GMat& src2, const GMat& src3, const GMat& src4); + +/** @brief Creates one 3-channel matrix out of 3 single-channel ones. + +The function merges several matrices to make a single multi-channel matrix. That is, each +element of the output matrix will be a concatenation of the elements of the input matrices, where +elements of i-th input matrix are treated as mv[i].channels()-element vectors. +Output matrix must be of @ref CV_8UC3 type. + +The function split3 does the reverse operation. + +@note + - Function textual ID is "org.opencv.core.transform.merge3" + +@param src1 first input @ref CV_8UC1 matrix to be merged. +@param src2 second input @ref CV_8UC1 matrix to be merged. +@param src3 third input @ref CV_8UC1 matrix to be merged. +@sa merge4, split4, split3 +*/ GAPI_EXPORTS GMat merge3(const GMat& src1, const GMat& src2, const GMat& src3); -/** @brief Divides a 3-channel (4-channel) matrix into 3(4) single-channel matrices. +/** @brief Divides a 4-channel matrix into 4 single-channel matrices. -The function splits a 3-channel (4-channel) matrix into 3(4) single-channel matrices: +The function splits a 4-channel matrix into 4 single-channel matrices: \f[\texttt{mv} [c](I) = \texttt{src} (I)_c\f] -All output matrices must be in @ref CV_8UC1. +All output matrices must be of @ref CV_8UC1 type. -@note Function textual for split3 ID is "org.opencv.core.transform.split3" -@note Function textual for split4 ID is "org.opencv.core.transform.split4" +The function merge4 does the reverse operation. -@param src input @ref CV_8UC4 (@ref CV_8UC3) matrix. -@sa merge3, merge4 +@note + - Function textual ID is "org.opencv.core.transform.split4" + +@param src input @ref CV_8UC4 matrix. +@sa split3, merge3, merge4 */ GAPI_EXPORTS std::tuple split4(const GMat& src); + +/** @brief Divides a 3-channel matrix into 3 single-channel matrices. + +The function splits a 3-channel matrix into 3 single-channel matrices: +\f[\texttt{mv} [c](I) = \texttt{src} (I)_c\f] + +All output matrices must be of @ref CV_8UC1 type. + +The function merge3 does the reverse operation. + +@note + - Function textual ID is "org.opencv.core.transform.split3" + +@param src input @ref CV_8UC3 matrix. +@sa split4, merge3, merge4 +*/ GAPI_EXPORTS_W std::tuple split3(const GMat& src); /** @brief Applies a generic geometrical transformation to an image. @@ -1463,21 +1596,21 @@ convert from floating to fixed-point representations of a map is that they can y cvFloor(y)) and \f$map_2\f$ contains indices in a table of interpolation coefficients. Output image must be of the same size and depth as input one. -@note Function textual ID is "org.opencv.core.transform.remap" +@note + - Function textual ID is "org.opencv.core.transform.remap" + - Due to current implementation limitations the size of an input and output images should be less than 32767x32767. @param src Source image. @param map1 The first map of either (x,y) points or just x values having the type CV_16SC2, CV_32FC1, or CV_32FC2. @param map2 The second map of y values having the type CV_16UC1, CV_32FC1, or none (empty map if map1 is (x,y) points), respectively. -@param interpolation Interpolation method (see cv::InterpolationFlags). The method INTER_AREA is -not supported by this function. +@param interpolation Interpolation method (see cv::InterpolationFlags). The methods #INTER_AREA +and #INTER_LINEAR_EXACT are not supported by this function. @param borderMode Pixel extrapolation method (see cv::BorderTypes). When borderMode=BORDER_TRANSPARENT, it means that the pixels in the destination image that corresponds to the "outliers" in the source image are not modified by the function. @param borderValue Value used in case of a constant border. By default, it is 0. -@note -Due to current implementation limitations the size of an input and output images should be less than 32767x32767. */ GAPI_EXPORTS GMat remap(const GMat& src, const Mat& map1, const Mat& map2, int interpolation, int borderMode = BORDER_CONSTANT, @@ -1732,9 +1865,83 @@ GAPI_EXPORTS GMat warpAffine(const GMat& src, const Mat& M, const Size& dsize, i int borderMode = cv::BORDER_CONSTANT, const Scalar& borderValue = Scalar()); //! @} gapi_transform +/** @brief Finds centers of clusters and groups input samples around the clusters. + +The function kmeans implements a k-means algorithm that finds the centers of K clusters +and groups the input samples around the clusters. As an output, \f$\texttt{bestLabels}_i\f$ +contains a 0-based cluster index for the \f$i^{th}\f$ sample. + +@note + - Function textual ID is "org.opencv.core.kmeansND" + - In case of an N-dimentional points' set given, input GMat can have the following traits: +2 dimensions, a single row or column if there are N channels, +or N columns if there is a single channel. Mat should have @ref CV_32F depth. + - Although, if GMat with height != 1, width != 1, channels != 1 given as data, n-dimensional +samples are considered given in amount of A, where A = height, n = width * channels. + - In case of GMat given as data: + - the output labels are returned as 1-channel GMat with sizes +width = 1, height = A, where A is samples amount, or width = bestLabels.width, +height = bestLabels.height if bestLabels given; + - the cluster centers are returned as 1-channel GMat with sizes +width = n, height = K, where n is samples' dimentionality and K is clusters' amount. + - As one of possible usages, if you want to control the initial labels for each attempt +by yourself, you can utilize just the core of the function. To do that, set the number +of attempts to 1, initialize labels each time using a custom algorithm, pass them with the +( flags = #KMEANS_USE_INITIAL_LABELS ) flag, and then choose the best (most-compact) clustering. + +@param data Data for clustering. An array of N-Dimensional points with float coordinates is needed. +Function can take GArray, GArray for 2D and 3D cases or GMat for any +dimentionality and channels. +@param K Number of clusters to split the set by. +@param bestLabels Optional input integer array that can store the supposed initial cluster indices +for every sample. Used when ( flags = #KMEANS_USE_INITIAL_LABELS ) flag is set. +@param criteria The algorithm termination criteria, that is, the maximum number of iterations +and/or the desired accuracy. The accuracy is specified as criteria.epsilon. As soon as each of +the cluster centers moves by less than criteria.epsilon on some iteration, the algorithm stops. +@param attempts Flag to specify the number of times the algorithm is executed using different +initial labellings. The algorithm returns the labels that yield the best compactness (see the first +function return value). +@param flags Flag that can take values of cv::KmeansFlags . + +@return + - Compactness measure that is computed as +\f[\sum _i \| \texttt{samples} _i - \texttt{centers} _{ \texttt{labels} _i} \| ^2\f] +after every attempt. The best (minimum) value is chosen and the corresponding labels and the +compactness value are returned by the function. + - Integer array that stores the cluster indices for every sample. + - Array of the cluster centers. +*/ +GAPI_EXPORTS std::tuple,GMat,GMat> +kmeans(const GMat& data, const int K, const GMat& bestLabels, + const TermCriteria& criteria, const int attempts, const KmeansFlags flags); + +/** @overload +@note + - Function textual ID is "org.opencv.core.kmeansNDNoInit" + - #KMEANS_USE_INITIAL_LABELS flag must not be set while using this overload. + */ +GAPI_EXPORTS std::tuple,GMat,GMat> +kmeans(const GMat& data, const int K, const TermCriteria& criteria, const int attempts, + const KmeansFlags flags); + +/** @overload +@note Function textual ID is "org.opencv.core.kmeans2D" + */ +GAPI_EXPORTS std::tuple,GArray,GArray> +kmeans(const GArray& data, const int K, const GArray& bestLabels, + const TermCriteria& criteria, const int attempts, const KmeansFlags flags); + +/** @overload +@note Function textual ID is "org.opencv.core.kmeans3D" + */ +GAPI_EXPORTS std::tuple,GArray,GArray> +kmeans(const GArray& data, const int K, const GArray& bestLabels, + const TermCriteria& criteria, const int attempts, const KmeansFlags flags); + +namespace streaming { /** @brief Gets dimensions from Mat. -@note Function textual ID is "org.opencv.core.size" +@note Function textual ID is "org.opencv.streaming.size" @param src Input tensor @return Size (tensor dimensions). @@ -1744,12 +1951,13 @@ GAPI_EXPORTS GOpaque size(const GMat& src); /** @overload Gets dimensions from rectangle. -@note Function textual ID is "org.opencv.core.sizeR" +@note Function textual ID is "org.opencv.streaming.sizeR" @param r Input rectangle. @return Size (rectangle dimensions). */ GAPI_EXPORTS GOpaque size(const GOpaque& r); +} //namespace streaming } //namespace gapi } //namespace cv diff --git a/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/cpu/gcpukernel.hpp b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/cpu/gcpukernel.hpp index 86ceace19fd..6ddcb7270c4 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/cpu/gcpukernel.hpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/cpu/gcpukernel.hpp @@ -101,6 +101,7 @@ public: const cv::Scalar& inVal(int input); cv::Scalar& outValR(int output); // FIXME: Avoid cv::Scalar s = ctx.outValR() + cv::MediaFrame& outFrame(int output); template std::vector& outVecR(int output) // FIXME: the same issue { return outVecRef(output).wref(); @@ -164,7 +165,7 @@ template<> struct get_in }; template<> struct get_in { - static cv::Mat get(GCPUContext &ctx, int idx) { return get_in::get(ctx, idx); } + static cv::MediaFrame get(GCPUContext &ctx, int idx) { return ctx.inArg(idx); } }; template<> struct get_in { @@ -258,6 +259,13 @@ template<> struct get_out return ctx.outValR(idx); } }; +template<> struct get_out +{ + static cv::MediaFrame& get(GCPUContext &ctx, int idx) + { + return ctx.outFrame(idx); + } +}; template struct get_out> { static std::vector& get(GCPUContext &ctx, int idx) @@ -271,6 +279,11 @@ template<> struct get_out >: public get_out>/GArray> conversion should be done more gracefully in the system +template struct get_out> >: public get_out> > +{ +}; + template struct get_out> { static U& get(GCPUContext &ctx, int idx) @@ -443,7 +456,7 @@ struct OCVStCallHelper, std::tuple> : template class GCPUKernelImpl: public cv::detail::KernelTag { - using CallHelper = detail::OCVCallHelper; + using CallHelper = cv::detail::OCVCallHelper; public: using API = K; @@ -497,7 +510,7 @@ private: template gapi::cpu::GOCVFunctor gapi::cpu::ocv_kernel(Callable& c) { - using P = detail::OCVCallHelper; + using P = cv::detail::OCVCallHelper; return GOCVFunctor{ K::id() , &K::getOutMeta , std::bind(&P::callFunctor, std::placeholders::_1, std::ref(c)) @@ -507,7 +520,7 @@ gapi::cpu::GOCVFunctor gapi::cpu::ocv_kernel(Callable& c) template gapi::cpu::GOCVFunctor gapi::cpu::ocv_kernel(const Callable& c) { - using P = detail::OCVCallHelper; + using P = cv::detail::OCVCallHelper; return GOCVFunctor{ K::id() , &K::getOutMeta , std::bind(&P::callFunctor, std::placeholders::_1, c) diff --git a/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/garg.hpp b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/garg.hpp index cca080d8a3b..d1482da8e48 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/garg.hpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/garg.hpp @@ -9,11 +9,14 @@ #define OPENCV_GAPI_GARG_HPP #include +#include #include #include #include +#include +#include #include #include @@ -21,9 +24,11 @@ #include #include #include +#include #include #include #include +#include namespace cv { @@ -90,16 +95,73 @@ using GArgs = std::vector; // FIXME: Express as M::type // FIXME: Move to a separate file! -using GRunArg = util::variant< +using GRunArgBase = util::variant< #if !defined(GAPI_STANDALONE) cv::UMat, #endif // !defined(GAPI_STANDALONE) + cv::RMat, cv::gapi::wip::IStreamSource::Ptr, cv::Mat, cv::Scalar, cv::detail::VectorRef, - cv::detail::OpaqueRef + cv::detail::OpaqueRef, + cv::MediaFrame >; + +namespace detail { +template +struct in_variant; + +template +struct in_variant > + : std::integral_constant::value > { +}; +} // namespace detail + +struct GAPI_EXPORTS GRunArg: public GRunArgBase +{ + // Metadata information here + using Meta = std::unordered_map; + Meta meta; + + // Mimic the old GRunArg semantics here, old of the times when + // GRunArg was an alias to variant<> + GRunArg(); + GRunArg(const cv::GRunArg &arg); + GRunArg(cv::GRunArg &&arg); + + GRunArg& operator= (const GRunArg &arg); + GRunArg& operator= (GRunArg &&arg); + + template + GRunArg(const T &t, + const Meta &m = Meta{}, + typename std::enable_if< detail::in_variant::value, int>::type = 0) + : GRunArgBase(t) + , meta(m) + { + } + template + GRunArg(T &&t, + const Meta &m = Meta{}, + typename std::enable_if< detail::in_variant::value, int>::type = 0) + : GRunArgBase(std::move(t)) + , meta(m) + { + } + template auto operator= (const T &t) + -> typename std::enable_if< detail::in_variant::value, cv::GRunArg>::type& + { + GRunArgBase::operator=(t); + return *this; + } + template auto operator= (T&& t) + -> typename std::enable_if< detail::in_variant::value, cv::GRunArg>::type& + { + GRunArgBase::operator=(std::move(t)); + return *this; + } +}; using GRunArgs = std::vector; // TODO: Think about the addition operator @@ -124,11 +186,13 @@ namespace gapi namespace wip { /** - * @brief This aggregate type represents all types which G-API can handle (via variant). + * @brief This aggregate type represents all types which G-API can + * handle (via variant). * - * It only exists to overcome C++ language limitations (where a `using`-defined class can't be forward-declared). + * It only exists to overcome C++ language limitations (where a + * `using`-defined class can't be forward-declared). */ -struct Data: public GRunArg +struct GAPI_EXPORTS Data: public GRunArg { using GRunArg::GRunArg; template @@ -144,7 +208,9 @@ using GRunArgP = util::variant< cv::UMat*, #endif // !defined(GAPI_STANDALONE) cv::Mat*, + cv::RMat*, cv::Scalar*, + cv::MediaFrame*, cv::detail::VectorRef, cv::detail::OpaqueRef >; diff --git a/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/garray.hpp b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/garray.hpp index 9118f4de984..5d4b3c59e0d 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/garray.hpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/garray.hpp @@ -284,6 +284,14 @@ namespace detail return static_cast&>(*m_ref).rref(); } + // Check if was created for/from std::vector + template bool holds() const + { + if (!m_ref) return false; + using U = typename std::decay::type; + return dynamic_cast*>(m_ref.get()) != nullptr; + } + void mov(VectorRef &v) { m_ref->mov(*v.m_ref); @@ -341,15 +349,18 @@ public: explicit GArray(detail::GArrayU &&ref) // GArrayU-based constructor : m_ref(ref) { putDetails(); } // (used by GCall, not for users) - detail::GArrayU strip() const { return m_ref; } + /// @private + detail::GArrayU strip() const { + return m_ref; + } + /// @private + static void VCtor(detail::VectorRef& vref) { + vref.reset(); + } private: - static void VCTor(detail::VectorRef& vref) { - vref.reset(); - vref.storeKind(); - } void putDetails() { - m_ref.setConstructFcn(&VCTor); + m_ref.setConstructFcn(&VCtor); m_ref.specifyType(); // FIXME: to unify those 2 to avoid excessive dynamic_cast m_ref.storeKind(); // } @@ -357,6 +368,8 @@ private: detail::GArrayU m_ref; }; +using GArrayP2f = GArray; + /** @} */ } // namespace cv diff --git a/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/gcall.hpp b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/gcall.hpp index ed5ba5fde84..8d1b8d60100 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/gcall.hpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/gcall.hpp @@ -11,6 +11,7 @@ #include // GArg #include // GMat #include // GScalar +#include // GFrame #include // GArray #include // GOpaque @@ -41,6 +42,7 @@ public: GMat yield (int output = 0); GMatP yieldP (int output = 0); GScalar yieldScalar(int output = 0); + GFrame yieldFrame (int output = 0); template GArray yieldArray(int output = 0) { @@ -56,11 +58,16 @@ public: Priv& priv(); const Priv& priv() const; -protected: - std::shared_ptr m_priv; + // GKernel and params can be modified, it's needed for infer, + // because information about output shapes doesn't exist in compile time + GKernel& kernel(); + cv::util::any& params(); void setArgs(std::vector &&args); +protected: + std::shared_ptr m_priv; + // Public versions return a typed array or opaque, those are implementation details detail::GArrayU yieldArray(int output = 0); detail::GOpaqueU yieldOpaque(int output = 0); diff --git a/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/gcommon.hpp b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/gcommon.hpp index dc2c8b9bd71..a474140baa0 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/gcommon.hpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/gcommon.hpp @@ -19,6 +19,7 @@ #include #include #include +#include namespace cv { @@ -44,12 +45,16 @@ namespace detail CV_BOOL, // bool user G-API data CV_INT, // int user G-API data CV_DOUBLE, // double user G-API data + CV_FLOAT, // float user G-API data + CV_UINT64, // uint64_t user G-API data + CV_STRING, // std::string user G-API data CV_POINT, // cv::Point user G-API data + CV_POINT2F, // cv::Point2f user G-API data CV_SIZE, // cv::Size user G-API data CV_RECT, // cv::Rect user G-API data CV_SCALAR, // cv::Scalar user G-API data CV_MAT, // cv::Mat user G-API data - CV_PRIM, // cv::gapi::wip::draw::Prim user G-API data + CV_DRAW_PRIM, // cv::gapi::wip::draw::Prim user G-API data }; // Type traits helper which simplifies the extraction of kind from type @@ -57,19 +62,24 @@ namespace detail template struct GOpaqueTraits { static constexpr const OpaqueKind kind = OpaqueKind::CV_UNKNOWN; }; template<> struct GOpaqueTraits { static constexpr const OpaqueKind kind = OpaqueKind::CV_INT; }; template<> struct GOpaqueTraits { static constexpr const OpaqueKind kind = OpaqueKind::CV_DOUBLE; }; - template<> struct GOpaqueTraits { static constexpr const OpaqueKind kind = OpaqueKind::CV_SIZE; }; + template<> struct GOpaqueTraits { static constexpr const OpaqueKind kind = OpaqueKind::CV_FLOAT; }; + template<> struct GOpaqueTraits { static constexpr const OpaqueKind kind = OpaqueKind::CV_UINT64; }; template<> struct GOpaqueTraits { static constexpr const OpaqueKind kind = OpaqueKind::CV_BOOL; }; + template<> struct GOpaqueTraits { static constexpr const OpaqueKind kind = OpaqueKind::CV_STRING; }; + template<> struct GOpaqueTraits { static constexpr const OpaqueKind kind = OpaqueKind::CV_SIZE; }; template<> struct GOpaqueTraits { static constexpr const OpaqueKind kind = OpaqueKind::CV_SCALAR; }; template<> struct GOpaqueTraits { static constexpr const OpaqueKind kind = OpaqueKind::CV_POINT; }; + template<> struct GOpaqueTraits { static constexpr const OpaqueKind kind = OpaqueKind::CV_POINT2F; }; template<> struct GOpaqueTraits { static constexpr const OpaqueKind kind = OpaqueKind::CV_MAT; }; template<> struct GOpaqueTraits { static constexpr const OpaqueKind kind = OpaqueKind::CV_RECT; }; template<> struct GOpaqueTraits { static constexpr const OpaqueKind kind = OpaqueKind::CV_MAT; }; template<> struct GOpaqueTraits - { static constexpr const OpaqueKind kind = OpaqueKind::CV_PRIM; }; - // GArray is not supporting bool type for now due to difference in std::vector implementation - using GOpaqueTraitsArrayTypes = std::tuple; + { static constexpr const OpaqueKind kind = OpaqueKind::CV_DRAW_PRIM; }; + using GOpaqueTraitsArrayTypes = std::tuple; // GOpaque is not supporting cv::Mat and cv::Scalar since there are GScalar and GMat types - using GOpaqueTraitsOpaqueTypes = std::tuple; + using GOpaqueTraitsOpaqueTypes = std::tuple; } // namespace detail // This definition is here because it is reused by both public(?) and internal @@ -87,6 +97,15 @@ enum class GShape: int GFRAME, }; +namespace gapi { +namespace s11n { +namespace detail { +template struct wrap_serialize; +} // namespace detail +} // namespace s11n +} // namespace gapi + + struct GCompileArg; namespace detail { @@ -132,7 +151,7 @@ namespace detail { * passed in (a variadic template parameter pack) into a vector of * cv::GCompileArg objects. */ -struct GAPI_EXPORTS_W_SIMPLE GCompileArg +struct GCompileArg { public: // NB: Required for pythnon bindings @@ -144,6 +163,9 @@ public: template::value, int>::type = 0> explicit GCompileArg(T &&t) : tag(detail::CompileArgTag::type>::tag()) + , serializeF(cv::gapi::s11n::detail::has_S11N_spec::value ? + &cv::gapi::s11n::detail::wrap_serialize::serialize : + nullptr) , arg(t) { } @@ -158,7 +180,16 @@ public: return util::any_cast(arg); } + void serialize(cv::gapi::s11n::IOStream& os) const + { + if (serializeF) + { + serializeF(os, *this); + } + } + private: + std::function serializeF; util::any arg; }; @@ -191,6 +222,19 @@ inline cv::util::optional getCompileArg(const cv::GCompileArgs &args) } return cv::util::optional(); } + +namespace s11n { +namespace detail { +template struct wrap_serialize +{ + static void serialize(IOStream& os, const GCompileArg& arg) + { + using DT = typename std::decay::type; + S11N
::serialize(os, arg.get
()); + } +}; +} // namespace detail +} // namespace s11n } // namespace gapi /** diff --git a/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/gcomputation.hpp b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/gcomputation.hpp index b24766a2326..8732ada0d6c 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/gcomputation.hpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/gcomputation.hpp @@ -37,14 +37,12 @@ namespace detail } // Forward-declare the serialization objects -namespace gimpl { +namespace gapi { namespace s11n { -namespace I { - struct IStream; - struct OStream; -} // namespace I + struct IIStream; + struct IOStream; } // namespace s11n -} // namespace gimpl +} // namespace gapi /** * \addtogroup gapi_main_classes @@ -259,6 +257,9 @@ public: */ void apply(GRunArgs &&ins, GRunArgsP &&outs, GCompileArgs &&args = {}); // Arg-to-arg overload + /// @private -- Exclude this function from OpenCV documentation + GAPI_WRAP GRunArgs apply(GRunArgs &&ins, GCompileArgs &&args = {}); + /// @private -- Exclude this function from OpenCV documentation void apply(const std::vector& ins, // Compatibility overload const std::vector& outs, @@ -286,7 +287,7 @@ public: * @param args compilation arguments for underlying compilation * process. */ - GAPI_WRAP void apply(cv::Mat in, CV_OUT cv::Scalar &out, GCompileArgs &&args = {}); // Unary overload (scalar) + void apply(cv::Mat in, cv::Scalar &out, GCompileArgs &&args = {}); // Unary overload (scalar) /** * @brief Execute a binary computation (with compilation on the fly) @@ -298,7 +299,7 @@ public: * @param args compilation arguments for underlying compilation * process. */ - GAPI_WRAP void apply(cv::Mat in1, cv::Mat in2, CV_OUT cv::Mat &out, GCompileArgs &&args = {}); // Binary overload + void apply(cv::Mat in1, cv::Mat in2, cv::Mat &out, GCompileArgs &&args = {}); // Binary overload /** * @brief Execute an binary computation (with compilation on the fly) @@ -435,7 +436,7 @@ public: * * @sa @ref gapi_compile_args */ - GStreamingCompiled compileStreaming(GMetaArgs &&in_metas, GCompileArgs &&args = {}); + GAPI_WRAP GStreamingCompiled compileStreaming(GMetaArgs &&in_metas, GCompileArgs &&args = {}); /** * @brief Compile the computation for streaming mode. @@ -456,7 +457,7 @@ public: * * @sa @ref gapi_compile_args */ - GStreamingCompiled compileStreaming(GCompileArgs &&args = {}); + GAPI_WRAP GStreamingCompiled compileStreaming(GCompileArgs &&args = {}); // 2. Direct metadata version /** @@ -506,9 +507,9 @@ public: /// @private const Priv& priv() const; /// @private - explicit GComputation(cv::gimpl::s11n::I::IStream &); + explicit GComputation(cv::gapi::s11n::IIStream &); /// @private - void serialize(cv::gimpl::s11n::I::OStream &) const; + void serialize(cv::gapi::s11n::IOStream &) const; protected: @@ -528,6 +529,7 @@ protected: GCompileArgs comp_args = std::get(meta_and_compile_args); return compileStreaming(std::move(meta_args), std::move(comp_args)); } + void recompile(GMetaArgs&& in_metas, GCompileArgs &&args); /// @private std::shared_ptr m_priv; }; diff --git a/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/gframe.hpp b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/gframe.hpp index 7e19dbf7f79..13fd5d6d295 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/gframe.hpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/gframe.hpp @@ -42,16 +42,29 @@ private: }; /** @} */ +enum class MediaFormat: int +{ + BGR = 0, + NV12, +}; + /** * \addtogroup gapi_meta_args * @{ */ struct GAPI_EXPORTS GFrameDesc { + MediaFormat fmt; + cv::Size size; + + bool operator== (const GFrameDesc &) const; }; static inline GFrameDesc empty_gframe_desc() { return GFrameDesc{}; } /** @} */ +class MediaFrame; +GAPI_EXPORTS GFrameDesc descr_of(const MediaFrame &frame); + GAPI_EXPORTS std::ostream& operator<<(std::ostream& os, const cv::GFrameDesc &desc); } // namespace cv diff --git a/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/gkernel.hpp b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/gkernel.hpp index 7752b101691..2c44c67a963 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/gkernel.hpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/gkernel.hpp @@ -26,8 +26,16 @@ namespace cv { -using GShapes = std::vector; -using GKinds = std::vector; +struct GTypeInfo +{ + GShape shape; + cv::detail::OpaqueKind kind; +}; + +using GShapes = std::vector; +using GKinds = std::vector; +using GCtors = std::vector; +using GTypesInfo = std::vector; // GKernel describes kernel API to the system // FIXME: add attributes of a kernel, (e.g. number and types @@ -41,6 +49,7 @@ struct GAPI_EXPORTS GKernel M outMeta; // generic adaptor to API::outMeta(...) GShapes outShapes; // types (shapes) kernel's outputs GKinds inKinds; // kinds of kernel's inputs (fixme: below) + GCtors outCtors; // captured constructors for template output types }; // TODO: It's questionable if inKinds should really be here. Instead, // this information could come from meta. @@ -60,30 +69,31 @@ namespace detail // yield() is used in graph construction time as a generic method to obtain // lazy "return value" of G-API operations // - namespace + template struct Yield; + template<> struct Yield { - template struct Yield; - template<> struct Yield - { - static inline cv::GMat yield(cv::GCall &call, int i) { return call.yield(i); } - }; - template<> struct Yield - { - static inline cv::GMatP yield(cv::GCall &call, int i) { return call.yieldP(i); } - }; - template<> struct Yield - { - static inline cv::GScalar yield(cv::GCall &call, int i) { return call.yieldScalar(i); } - }; - template struct Yield > - { - static inline cv::GArray yield(cv::GCall &call, int i) { return call.yieldArray(i); } - }; - template struct Yield > - { - static inline cv::GOpaque yield(cv::GCall &call, int i) { return call.yieldOpaque(i); } - }; - } // anonymous namespace + static inline cv::GMat yield(cv::GCall &call, int i) { return call.yield(i); } + }; + template<> struct Yield + { + static inline cv::GMatP yield(cv::GCall &call, int i) { return call.yieldP(i); } + }; + template<> struct Yield + { + static inline cv::GScalar yield(cv::GCall &call, int i) { return call.yieldScalar(i); } + }; + template struct Yield > + { + static inline cv::GArray yield(cv::GCall &call, int i) { return call.yieldArray(i); } + }; + template struct Yield > + { + static inline cv::GOpaque yield(cv::GCall &call, int i) { return call.yieldOpaque(i); } + }; + template<> struct Yield + { + static inline cv::GFrame yield(cv::GCall &call, int i) { return call.yieldFrame(i); } + }; //////////////////////////////////////////////////////////////////////////// // Helper classes which brings outputMeta() marshalling to kernel @@ -95,11 +105,12 @@ namespace detail template struct MetaType; template<> struct MetaType { using type = GMatDesc; }; template<> struct MetaType { using type = GMatDesc; }; - template<> struct MetaType { using type = GMatDesc; }; + template<> struct MetaType { using type = GFrameDesc; }; template<> struct MetaType { using type = GScalarDesc; }; template struct MetaType > { using type = GArrayDesc; }; template struct MetaType > { using type = GOpaqueDesc; }; template struct MetaType { using type = T; }; // opaque args passed as-is + // FIXME: Move it to type traits? // 2. Hacky test based on MetaType to check if we operate on G-* type or not template using is_nongapi_type = std::is_same::type>; @@ -214,7 +225,8 @@ public: , K::tag() , &K::getOutMeta , {detail::GTypeTraits::shape...} - , {detail::GTypeTraits::op_kind...}}); + , {detail::GTypeTraits::op_kind...} + , {detail::GObtainCtor::get()...}}); call.pass(args...); // TODO: std::forward() here? return yield(call, typename detail::MkSeq::type()); } @@ -231,15 +243,14 @@ public: using InArgs = std::tuple; using OutArgs = std::tuple; - static_assert(!cv::detail::contains::value, "Values of GFrame type can't be used as operation outputs"); - static R on(Args... args) { cv::GCall call(GKernel{ K::id() , K::tag() , &K::getOutMeta , {detail::GTypeTraits::shape} - , {detail::GTypeTraits::op_kind...}}); + , {detail::GTypeTraits::op_kind...} + , {detail::GObtainCtor::get()}}); call.pass(args...); return detail::Yield::yield(call, 0); } @@ -458,11 +469,6 @@ namespace gapi { std::vector m_transformations; protected: - /// @private - // Check if package contains ANY implementation of a kernel API - // by API textual id. - bool includesAPI(const std::string &id) const; - /// @private // Remove ALL implementations of the given API (identified by ID) void removeAPI(const std::string &id); @@ -565,6 +571,9 @@ namespace gapi { return includesAPI(KAPI::id()); } + /// @private + bool includesAPI(const std::string &id) const; + // FIXME: The below comment is wrong, and who needs this function? /** * @brief Find a kernel (by its API) diff --git a/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/gmat.hpp b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/gmat.hpp index b38ce48c971..20a10db92b5 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/gmat.hpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/gmat.hpp @@ -65,6 +65,8 @@ public: using GMat::GMat; }; +class RMat; + /** @} */ /** @@ -113,6 +115,8 @@ struct GAPI_EXPORTS GMatDesc // and as a 3-channel planar mat with height divided by 3) bool canDescribe(const cv::Mat& mat) const; + bool canDescribe(const cv::RMat& mat) const; + // Meta combinator: return a new GMatDesc which differs in size by delta // (all other fields are taken unchanged from this GMatDesc) // FIXME: a better name? @@ -199,6 +203,27 @@ struct GAPI_EXPORTS GMatDesc static inline GMatDesc empty_gmat_desc() { return GMatDesc{-1,-1,{-1,-1}}; } +namespace gapi { namespace detail { +/** Checks GMatDesc fields if the passed matrix is a set of n-dimentional points. +@param in GMatDesc to check. +@param n expected dimensionality. +@return the amount of points. In case input matrix can't be described as vector of points +of expected dimensionality, returns -1. + */ +int checkVector(const GMatDesc& in, const size_t n); + +/** @overload + +Checks GMatDesc fields if the passed matrix can be described as a set of points of any +dimensionality. + +@return array of two elements in form of std::vector: the amount of points +and their calculated dimensionality. In case input matrix can't be described as vector of points, +returns {-1, -1}. + */ +std::vector checkVector(const GMatDesc& in); +}} // namespace gapi::detail + #if !defined(GAPI_STANDALONE) GAPI_EXPORTS GMatDesc descr_of(const cv::UMat &mat); #endif // !defined(GAPI_STANDALONE) @@ -209,6 +234,8 @@ namespace gapi { namespace own { GAPI_EXPORTS GMatDesc descr_of(const Mat &mat); }}//gapi::own +GAPI_EXPORTS GMatDesc descr_of(const RMat &mat); + #if !defined(GAPI_STANDALONE) GAPI_EXPORTS GMatDesc descr_of(const cv::Mat &mat); #else diff --git a/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/gmetaarg.hpp b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/gmetaarg.hpp index 499de45aecf..f21182c19f4 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/gmetaarg.hpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/gmetaarg.hpp @@ -18,6 +18,7 @@ #include #include #include +#include namespace cv { @@ -38,6 +39,7 @@ using GMetaArg = util::variant , GScalarDesc , GArrayDesc , GOpaqueDesc + , GFrameDesc >; GAPI_EXPORTS std::ostream& operator<<(std::ostream& os, const GMetaArg &); diff --git a/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/gopaque.hpp b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/gopaque.hpp index 1c45d4683eb..6117971768b 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/gopaque.hpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/gopaque.hpp @@ -15,6 +15,7 @@ #include #include +#include #include #include #include @@ -119,6 +120,7 @@ namespace detail virtual void mov(BasicOpaqueRef &ref) = 0; virtual const void* ptr() const = 0; + virtual void set(const cv::util::any &a) = 0; }; template class OpaqueRefT final: public BasicOpaqueRef @@ -212,6 +214,10 @@ namespace detail } virtual const void* ptr() const override { return &rref(); } + + virtual void set(const cv::util::any &a) override { + wref() = util::any_cast(a); + } }; // This class strips type information from OpaqueRefT<> and makes it usable @@ -240,7 +246,7 @@ namespace detail // FIXME: probably won't work with const object explicit OpaqueRef(T&& obj) : m_ref(new OpaqueRefT>(std::forward(obj))), - m_kind(GOpaqueTraits::kind) {} + m_kind(GOpaqueTraits>::kind) {} cv::detail::OpaqueKind getKind() const { @@ -285,6 +291,13 @@ namespace detail // May be used to uniquely identify this object internally const void *ptr() const { return m_ref->ptr(); } + + // Introduced for in-graph meta handling + OpaqueRef& operator= (const cv::util::any &a) + { + m_ref->set(a); + return *this; + } }; } // namespace detail @@ -295,25 +308,27 @@ namespace detail template class GOpaque { public: - GOpaque() { putDetails(); } // Empty constructor - explicit GOpaque(detail::GOpaqueU &&ref) // GOpaqueU-based constructor - : m_ref(ref) { putDetails(); } // (used by GCall, not for users) - - detail::GOpaqueU strip() const { return m_ref; } - -private: // Host type (or Flat type) - the type this GOpaque is actually // specified to. using HT = typename detail::flatten_g>::type; - static void CTor(detail::OpaqueRef& ref) { - ref.reset(); - ref.storeKind(); + GOpaque() { putDetails(); } // Empty constructor + explicit GOpaque(detail::GOpaqueU &&ref) // GOpaqueU-based constructor + : m_ref(ref) { putDetails(); } // (used by GCall, not for users) + + /// @private + detail::GOpaqueU strip() const { + return m_ref; } + /// @private + static void Ctor(detail::OpaqueRef& ref) { + ref.reset(); + } +private: void putDetails() { - m_ref.setConstructFcn(&CTor); - m_ref.specifyType(); // FIXME: to unify those 2 to avoid excessive dynamic_cast - m_ref.storeKind(); // + m_ref.setConstructFcn(&Ctor); + m_ref.specifyType(); + m_ref.storeKind(); } detail::GOpaqueU m_ref; diff --git a/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/gproto.hpp b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/gproto.hpp index fbcccb38ea7..f91fcdb2c8c 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/gproto.hpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/gproto.hpp @@ -135,7 +135,7 @@ GRunArg value_of(const GOrigin &origin); // Transform run-time computation arguments into a collection of metadata // extracted from that arguments GMetaArg GAPI_EXPORTS descr_of(const GRunArg &arg ); -GMetaArgs GAPI_EXPORTS descr_of(const GRunArgs &args); +GMetaArgs GAPI_EXPORTS_W descr_of(const GRunArgs &args); // Transform run-time operation result argument into metadata extracted from that argument // Used to compare the metadata, which generated at compile time with the metadata result operation in run time diff --git a/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/gstreaming.hpp b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/gstreaming.hpp index 70790420699..e09cf8d0f78 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/gstreaming.hpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/gstreaming.hpp @@ -8,15 +8,99 @@ #ifndef OPENCV_GAPI_GSTREAMING_COMPILED_HPP #define OPENCV_GAPI_GSTREAMING_COMPILED_HPP +#include #include #include #include +#include #include #include namespace cv { +template using optional = cv::util::optional; + +namespace detail { +template struct wref_spec { + using type = T; +}; +template struct wref_spec > { + using type = T; +}; + +template +struct OptRef { + struct OptHolder { + virtual void mov(RefHolder &h) = 0; + virtual void reset() = 0; + virtual ~OptHolder() = default; + using Ptr = std::shared_ptr; + }; + template struct Holder final: OptHolder { + std::reference_wrapper > m_opt_ref; + + explicit Holder(cv::optional& opt) : m_opt_ref(std::ref(opt)) { + } + virtual void mov(RefHolder &h) override { + using U = typename wref_spec::type; + m_opt_ref.get() = cv::util::make_optional(std::move(h.template wref())); + } + virtual void reset() override { + m_opt_ref.get().reset(); + } + }; + template + explicit OptRef(cv::optional& t) : m_opt{new Holder(t)} {} + void mov(RefHolder &h) { m_opt->mov(h); } + void reset() { m_opt->reset();} +private: + typename OptHolder::Ptr m_opt; +}; +using OptionalVectorRef = OptRef; +using OptionalOpaqueRef = OptRef; +} // namespace detail + +// TODO: Keep it in sync with GRunArgP (derive the type automatically?) +using GOptRunArgP = util::variant< + optional*, + optional*, + optional*, + cv::detail::OptionalVectorRef, + cv::detail::OptionalOpaqueRef +>; +using GOptRunArgsP = std::vector; + +namespace detail { + +template inline GOptRunArgP wrap_opt_arg(optional& arg) { + // By default, T goes to an OpaqueRef. All other types are specialized + return GOptRunArgP{OptionalOpaqueRef(arg)}; +} + +template inline GOptRunArgP wrap_opt_arg(optional >& arg) { + return GOptRunArgP{OptionalVectorRef(arg)}; +} + +template<> inline GOptRunArgP wrap_opt_arg(optional &m) { + return GOptRunArgP{&m}; +} + +template<> inline GOptRunArgP wrap_opt_arg(optional &s) { + return GOptRunArgP{&s}; +} + +} // namespace detail + +// Now cv::gout() may produce an empty vector (see "dynamic graphs"), so +// there may be a conflict between these two. State here that Opt version +// _must_ have at least one input for this overload +template +inline GOptRunArgsP gout(optional&arg, optional&... args) +{ + return GOptRunArgsP{ detail::wrap_opt_arg(arg), detail::wrap_opt_arg(args)... }; +} + /** * \addtogroup gapi_main_classes * @{ @@ -49,11 +133,11 @@ namespace cv { * * @sa GCompiled */ -class GAPI_EXPORTS GStreamingCompiled +class GAPI_EXPORTS_W_SIMPLE GStreamingCompiled { public: class GAPI_EXPORTS Priv; - GStreamingCompiled(); + GAPI_WRAP GStreamingCompiled(); // FIXME: More overloads? /** @@ -96,7 +180,7 @@ public: * @param ins vector of inputs to process. * @sa gin */ - void setSource(GRunArgs &&ins); + GAPI_WRAP void setSource(GRunArgs &&ins); /** * @brief Specify an input video stream for a single-input @@ -109,7 +193,23 @@ public: * @param s a shared pointer to IStreamSource representing the * input video stream. */ - void setSource(const gapi::wip::IStreamSource::Ptr& s); + GAPI_WRAP void setSource(const gapi::wip::IStreamSource::Ptr& s); + + /** + * @brief Constructs and specifies an input video stream for a + * single-input computation pipeline with the given parameters. + * + * Throws if pipeline is already running. Use stop() and then + * setSource() to run the graph on a new video stream. + * + * @overload + * @param args arguments used to contruct and initialize a stream + * source. + */ + template + void setSource(Args&&... args) { + setSource(cv::gapi::wip::make_src(std::forward(args)...)); + } /** * @brief Start the pipeline execution. @@ -126,7 +226,7 @@ public: * start()/stop()/setSource() may be called on the same object in * multiple threads in your application. */ - void start(); + GAPI_WRAP void start(); /** * @brief Get the next processed frame from the pipeline. @@ -150,6 +250,47 @@ public: */ bool pull(cv::GRunArgsP &&outs); + // NB: Used from python + GAPI_WRAP std::tuple pull(); + + /** + * @brief Get some next available data from the pipeline. + * + * This method takes a vector of cv::optional object. An object is + * assigned to some value if this value is available (ready) at + * the time of the call, and resets the object to empty() if it is + * not. + * + * This is a blocking method which guarantees that some data has + * been written to the output vector on return. + * + * Using this method only makes sense if the graph has + * desynchronized parts (see cv::gapi::desync). If there is no + * desynchronized parts in the graph, the behavior of this + * method is identical to the regular pull() (all data objects are + * produced synchronously in the output vector). + * + * Use gout() to create an output parameter vector. + * + * Output vectors must have the same number of elements as defined + * in the cv::GComputation protocol (at the moment of its + * construction). Shapes of elements also must conform to protocol + * (e.g. cv::optional needs to be passed where cv::GMat + * has been declared as output, and so on). Run-time exception is + * generated on type mismatch. + * + * This method writes new data into objects passed via output + * vector. If there is no data ready yet, this method blocks. Use + * try_pull() if you need a non-blocking version. + * + * @param outs vector of output parameters to obtain. + * @return true if next result has been obtained, + * false marks end of the stream. + * + * @sa cv::gapi::desync + */ + bool pull(cv::GOptRunArgsP &&outs); + /** * @brief Try to get the next processed frame from the pipeline. * @@ -172,7 +313,7 @@ public: * * Throws if the pipeline is not running. */ - void stop(); + GAPI_WRAP void stop(); /** * @brief Test if the pipeline is running. @@ -184,7 +325,7 @@ public: * * @return true if the current stream is not over yet. */ - bool running() const; + GAPI_WRAP bool running() const; /// @private Priv& priv(); diff --git a/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/gtype_traits.hpp b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/gtype_traits.hpp index 3235b1a3738..2e8dcb1aec7 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/gtype_traits.hpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/gtype_traits.hpp @@ -17,6 +17,7 @@ #include #include #include +#include #include namespace cv @@ -67,7 +68,7 @@ namespace detail template<> struct GTypeTraits { static constexpr const ArgKind kind = ArgKind::GFRAME; - static constexpr const GShape shape = GShape::GMAT; + static constexpr const GShape shape = GShape::GFRAME; static constexpr const OpaqueKind op_kind = OpaqueKind::CV_UNKNOWN; }; template<> struct GTypeTraits @@ -121,9 +122,11 @@ namespace detail template<> struct GTypeOf { using type = cv::GMat; }; #endif // !defined(GAPI_STANDALONE) template<> struct GTypeOf { using type = cv::GMat; }; + template<> struct GTypeOf { using type = cv::GMat; }; template<> struct GTypeOf { using type = cv::GScalar; }; template struct GTypeOf > { using type = cv::GArray; }; template struct GTypeOf { using type = cv::GOpaque;}; + template<> struct GTypeOf { using type = cv::GFrame; }; // FIXME: This is not quite correct since IStreamSource may produce not only Mat but also Scalar // and vector data. TODO: Extend the type dispatching on these types too. template<> struct GTypeOf { using type = cv::GMat;}; @@ -188,6 +191,29 @@ namespace detail template using wrap_gapi_helper = WrapValue::type>; template using wrap_host_helper = WrapValue >::type>; + +// Union type for various user-defined type constructors (GArray, +// GOpaque, etc) +// +// TODO: Replace construct-only API with a more generic one (probably +// with bits of introspection) +// +// Not required for non-user-defined types (GMat, GScalar, etc) +using HostCtor = util::variant + < util::monostate + , detail::ConstructVec + , detail::ConstructOpaque + >; + +template struct GObtainCtor { + static HostCtor get() { return HostCtor{}; } +}; +template struct GObtainCtor > { + static HostCtor get() { return HostCtor{ConstructVec{&GArray::VCtor}}; }; +}; +template struct GObtainCtor > { + static HostCtor get() { return HostCtor{ConstructOpaque{&GOpaque::Ctor}}; }; +}; } // namespace detail } // namespace cv diff --git a/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/imgproc.hpp b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/imgproc.hpp index b4905e932b6..699f64837a9 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/imgproc.hpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/imgproc.hpp @@ -21,14 +21,36 @@ @{ @defgroup gapi_filters Graph API: Image filters @defgroup gapi_colorconvert Graph API: Converting image from one color space to another + @defgroup gapi_feature Graph API: Image Feature Detection + @defgroup gapi_shape Graph API: Image Structural Analysis and Shape Descriptors @} */ +namespace { +void validateFindingContoursMeta(const int depth, const int chan, const int mode) +{ + GAPI_Assert(chan == 1); + switch (mode) + { + case cv::RETR_CCOMP: + GAPI_Assert(depth == CV_8U || depth == CV_32S); + break; + case cv::RETR_FLOODFILL: + GAPI_Assert(depth == CV_32S); + break; + default: + GAPI_Assert(depth == CV_8U); + break; + } +} +} // anonymous namespace + namespace cv { namespace gapi { namespace imgproc { using GMat2 = std::tuple; using GMat3 = std::tuple; // FIXME: how to avoid this? + using GFindContoursOutput = std::tuple>,GArray>; G_TYPED_KERNEL(GFilter2D, ,"org.opencv.imgproc.filters.filter2D") { static GMatDesc outMeta(GMatDesc in, int ddepth, Mat, Point, Scalar, int, Scalar) { @@ -78,6 +100,14 @@ namespace imgproc { } }; + G_TYPED_KERNEL(GMorphologyEx, , + "org.opencv.imgproc.filters.morphologyEx") { + static GMatDesc outMeta(const GMatDesc &in, MorphTypes, Mat, Point, int, + BorderTypes, Scalar) { + return in; + } + }; + G_TYPED_KERNEL(GSobel, , "org.opencv.imgproc.filters.sobel") { static GMatDesc outMeta(GMatDesc in, int ddepth, int, int, int, double, double, int, Scalar) { return in.withDepth(ddepth); @@ -110,7 +140,7 @@ namespace imgproc { } }; - G_TYPED_KERNEL(GCanny, , "org.opencv.imgproc.canny"){ + G_TYPED_KERNEL(GCanny, , "org.opencv.imgproc.feature.canny"){ static GMatDesc outMeta(GMatDesc in, double, double, int, bool) { return in.withType(CV_8U, 1); } @@ -118,12 +148,164 @@ namespace imgproc { G_TYPED_KERNEL(GGoodFeatures, (GMat,int,double,double,Mat,int,bool,double)>, - "org.opencv.imgproc.goodFeaturesToTrack") { + "org.opencv.imgproc.feature.goodFeaturesToTrack") { static GArrayDesc outMeta(GMatDesc, int, double, double, const Mat&, int, bool, double) { return empty_array_desc(); } }; + using RetrMode = RetrievalModes; + using ContMethod = ContourApproximationModes; + G_TYPED_KERNEL(GFindContours, >(GMat,RetrMode,ContMethod,GOpaque)>, + "org.opencv.imgproc.shape.findContours") + { + static GArrayDesc outMeta(GMatDesc in, RetrMode mode, ContMethod, GOpaqueDesc) + { + validateFindingContoursMeta(in.depth, in.chan, mode); + return empty_array_desc(); + } + }; + + // FIXME oc: make default value offset = Point() + G_TYPED_KERNEL(GFindContoursNoOffset, >(GMat,RetrMode,ContMethod)>, + "org.opencv.imgproc.shape.findContoursNoOffset") + { + static GArrayDesc outMeta(GMatDesc in, RetrMode mode, ContMethod) + { + validateFindingContoursMeta(in.depth, in.chan, mode); + return empty_array_desc(); + } + }; + + G_TYPED_KERNEL(GFindContoursH,)>, + "org.opencv.imgproc.shape.findContoursH") + { + static std::tuple + outMeta(GMatDesc in, RetrMode mode, ContMethod, GOpaqueDesc) + { + validateFindingContoursMeta(in.depth, in.chan, mode); + return std::make_tuple(empty_array_desc(), empty_array_desc()); + } + }; + + // FIXME oc: make default value offset = Point() + G_TYPED_KERNEL(GFindContoursHNoOffset,, + "org.opencv.imgproc.shape.findContoursHNoOffset") + { + static std::tuple + outMeta(GMatDesc in, RetrMode mode, ContMethod) + { + validateFindingContoursMeta(in.depth, in.chan, mode); + return std::make_tuple(empty_array_desc(), empty_array_desc()); + } + }; + + G_TYPED_KERNEL(GBoundingRectMat, (GMat)>, + "org.opencv.imgproc.shape.boundingRectMat") { + static GOpaqueDesc outMeta(GMatDesc in) { + if (in.depth == CV_8U) + { + GAPI_Assert(in.chan == 1); + } + else + { + GAPI_Assert (in.depth == CV_32S || in.depth == CV_32F); + int amount = detail::checkVector(in, 2u); + GAPI_Assert(amount != -1 && + "Input Mat can't be described as vector of 2-dimentional points"); + } + return empty_gopaque_desc(); + } + }; + + G_TYPED_KERNEL(GBoundingRectVector32S, (GArray)>, + "org.opencv.imgproc.shape.boundingRectVector32S") { + static GOpaqueDesc outMeta(GArrayDesc) { + return empty_gopaque_desc(); + } + }; + + G_TYPED_KERNEL(GBoundingRectVector32F, (GArray)>, + "org.opencv.imgproc.shape.boundingRectVector32F") { + static GOpaqueDesc outMeta(GArrayDesc) { + return empty_gopaque_desc(); + } + }; + + G_TYPED_KERNEL(GFitLine2DMat, (GMat,DistanceTypes,double,double,double)>, + "org.opencv.imgproc.shape.fitLine2DMat") { + static GOpaqueDesc outMeta(GMatDesc in,DistanceTypes,double,double,double) { + int amount = detail::checkVector(in, 2u); + GAPI_Assert(amount != -1 && + "Input Mat can't be described as vector of 2-dimentional points"); + return empty_gopaque_desc(); + } + }; + + G_TYPED_KERNEL(GFitLine2DVector32S, + (GArray,DistanceTypes,double,double,double)>, + "org.opencv.imgproc.shape.fitLine2DVector32S") { + static GOpaqueDesc outMeta(GArrayDesc,DistanceTypes,double,double,double) { + return empty_gopaque_desc(); + } + }; + + G_TYPED_KERNEL(GFitLine2DVector32F, + (GArray,DistanceTypes,double,double,double)>, + "org.opencv.imgproc.shape.fitLine2DVector32F") { + static GOpaqueDesc outMeta(GArrayDesc,DistanceTypes,double,double,double) { + return empty_gopaque_desc(); + } + }; + + G_TYPED_KERNEL(GFitLine2DVector64F, + (GArray,DistanceTypes,double,double,double)>, + "org.opencv.imgproc.shape.fitLine2DVector64F") { + static GOpaqueDesc outMeta(GArrayDesc,DistanceTypes,double,double,double) { + return empty_gopaque_desc(); + } + }; + + G_TYPED_KERNEL(GFitLine3DMat, (GMat,DistanceTypes,double,double,double)>, + "org.opencv.imgproc.shape.fitLine3DMat") { + static GOpaqueDesc outMeta(GMatDesc in,int,double,double,double) { + int amount = detail::checkVector(in, 3u); + GAPI_Assert(amount != -1 && + "Input Mat can't be described as vector of 3-dimentional points"); + return empty_gopaque_desc(); + } + }; + + G_TYPED_KERNEL(GFitLine3DVector32S, + (GArray,DistanceTypes,double,double,double)>, + "org.opencv.imgproc.shape.fitLine3DVector32S") { + static GOpaqueDesc outMeta(GArrayDesc,DistanceTypes,double,double,double) { + return empty_gopaque_desc(); + } + }; + + G_TYPED_KERNEL(GFitLine3DVector32F, + (GArray,DistanceTypes,double,double,double)>, + "org.opencv.imgproc.shape.fitLine3DVector32F") { + static GOpaqueDesc outMeta(GArrayDesc,DistanceTypes,double,double,double) { + return empty_gopaque_desc(); + } + }; + + G_TYPED_KERNEL(GFitLine3DVector64F, + (GArray,DistanceTypes,double,double,double)>, + "org.opencv.imgproc.shape.fitLine3DVector64F") { + static GOpaqueDesc outMeta(GArrayDesc,DistanceTypes,double,double,double) { + return empty_gopaque_desc(); + } + }; + + G_TYPED_KERNEL(GBGR2RGB, , "org.opencv.imgproc.colorconvert.bgr2rgb") { + static GMatDesc outMeta(GMatDesc in) { + return in; // type still remains CV_8UC3; + } + }; + G_TYPED_KERNEL(GRGB2YUV, , "org.opencv.imgproc.colorconvert.rgb2yuv") { static GMatDesc outMeta(GMatDesc in) { return in; // type still remains CV_8UC3; @@ -136,6 +318,42 @@ namespace imgproc { } }; + G_TYPED_KERNEL(GBGR2I420, , "org.opencv.imgproc.colorconvert.bgr2i420") { + static GMatDesc outMeta(GMatDesc in) { + GAPI_Assert(in.depth == CV_8U); + GAPI_Assert(in.chan == 3); + GAPI_Assert(in.size.height % 2 == 0); + return in.withType(in.depth, 1).withSize(Size(in.size.width, in.size.height * 3 / 2)); + } + }; + + G_TYPED_KERNEL(GRGB2I420, , "org.opencv.imgproc.colorconvert.rgb2i420") { + static GMatDesc outMeta(GMatDesc in) { + GAPI_Assert(in.depth == CV_8U); + GAPI_Assert(in.chan == 3); + GAPI_Assert(in.size.height % 2 == 0); + return in.withType(in.depth, 1).withSize(Size(in.size.width, in.size.height * 3 / 2)); + } + }; + + G_TYPED_KERNEL(GI4202BGR, , "org.opencv.imgproc.colorconvert.i4202bgr") { + static GMatDesc outMeta(GMatDesc in) { + GAPI_Assert(in.depth == CV_8U); + GAPI_Assert(in.chan == 1); + GAPI_Assert(in.size.height % 3 == 0); + return in.withType(in.depth, 3).withSize(Size(in.size.width, in.size.height * 2 / 3)); + } + }; + + G_TYPED_KERNEL(GI4202RGB, , "org.opencv.imgproc.colorconvert.i4202rgb") { + static GMatDesc outMeta(GMatDesc in) { + GAPI_Assert(in.depth == CV_8U); + GAPI_Assert(in.chan == 1); + GAPI_Assert(in.size.height % 3 == 0); + return in.withType(in.depth, 3).withSize(Size(in.size.width, in.size.height * 2 / 3)); + } + }; + G_TYPED_KERNEL(GNV12toRGB, , "org.opencv.imgproc.colorconvert.nv12torgb") { static GMatDesc outMeta(GMatDesc in_y, GMatDesc in_uv) { GAPI_Assert(in_y.chan == 1); @@ -230,7 +448,7 @@ namespace imgproc { } }; - G_TYPED_KERNEL(GNV12toRGBp, , "org.opencv.colorconvert.imgproc.nv12torgbp") { + G_TYPED_KERNEL(GNV12toRGBp, , "org.opencv.imgproc.colorconvert.nv12torgbp") { static GMatDesc outMeta(GMatDesc inY, GMatDesc inUV) { GAPI_Assert(inY.depth == CV_8U); GAPI_Assert(inUV.depth == CV_8U); @@ -244,7 +462,7 @@ namespace imgproc { } }; - G_TYPED_KERNEL(GNV12toGray, , "org.opencv.colorconvert.imgproc.nv12togray") { + G_TYPED_KERNEL(GNV12toGray, , "org.opencv.imgproc.colorconvert.nv12togray") { static GMatDesc outMeta(GMatDesc inY, GMatDesc inUV) { GAPI_Assert(inY.depth == CV_8U); GAPI_Assert(inUV.depth == CV_8U); @@ -259,7 +477,7 @@ namespace imgproc { } }; - G_TYPED_KERNEL(GNV12toBGRp, , "org.opencv.colorconvert.imgproc.nv12tobgrp") { + G_TYPED_KERNEL(GNV12toBGRp, , "org.opencv.imgproc.colorconvert.nv12tobgrp") { static GMatDesc outMeta(GMatDesc inY, GMatDesc inUV) { GAPI_Assert(inY.depth == CV_8U); GAPI_Assert(inUV.depth == CV_8U); @@ -285,10 +503,10 @@ kernel kernelY. The final result is returned. Supported matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1. Output image must have the same type, size, and number of channels as the input image. -@note In case of floating-point computation, rounding to nearest even is procedeed +@note + - In case of floating-point computation, rounding to nearest even is procedeed if hardware supports it (if not - to nearest value). - -@note Function textual ID is "org.opencv.imgproc.filters.sepfilter" + - Function textual ID is "org.opencv.imgproc.filters.sepfilter" @param src Source image. @param ddepth desired depth of the destination image (the following combinations of src.depth() and ddepth are supported: @@ -327,9 +545,9 @@ anchor.y - 1)`. Supported matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1. Output image must have the same size and number of channels an input image. -@note Rounding to nearest even is procedeed if hardware supports it, if not - to nearest. - -@note Function textual ID is "org.opencv.imgproc.filters.filter2D" +@note + - Rounding to nearest even is procedeed if hardware supports it, if not - to nearest. + - Function textual ID is "org.opencv.imgproc.filters.filter2D" @param src input image. @param ddepth desired depth of the destination image @@ -364,9 +582,9 @@ algorithms, and so on). If you need to compute pixel sums over variable-size win Supported input matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1. Output image must have the same type, size, and number of channels as the input image. -@note Rounding to nearest even is procedeed if hardware supports it, if not - to nearest. - -@note Function textual ID is "org.opencv.imgproc.filters.boxfilter" +@note + - Rounding to nearest even is procedeed if hardware supports it, if not - to nearest. + - Function textual ID is "org.opencv.imgproc.filters.boxfilter" @param src Source image. @param dtype the output image depth (-1 to set the input image data type). @@ -393,9 +611,9 @@ true, borderType)`. Supported input matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1. Output image must have the same type, size, and number of channels as the input image. -@note Rounding to nearest even is procedeed if hardware supports it, if not - to nearest. - -@note Function textual ID is "org.opencv.imgproc.filters.blur" +@note + - Rounding to nearest even is procedeed if hardware supports it, if not - to nearest. + - Function textual ID is "org.opencv.imgproc.filters.blur" @param src Source image. @param ksize blurring kernel size. @@ -421,9 +639,9 @@ Output image must have the same type and number of channels an input image. Supported input matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1. Output image must have the same type, size, and number of channels as the input image. -@note Rounding to nearest even is procedeed if hardware supports it, if not - to nearest. - -@note Function textual ID is "org.opencv.imgproc.filters.gaussianBlur" +@note + - Rounding to nearest even is procedeed if hardware supports it, if not - to nearest. + - Function textual ID is "org.opencv.imgproc.filters.gaussianBlur" @param src input image; @param ksize Gaussian kernel size. ksize.width and ksize.height can differ but they both must be @@ -446,16 +664,16 @@ GAPI_EXPORTS GMat gaussianBlur(const GMat& src, const Size& ksize, double sigmaX The function smoothes an image using the median filter with the \f$\texttt{ksize} \times \texttt{ksize}\f$ aperture. Each channel of a multi-channel image is processed independently. Output image must have the same type, size, and number of channels as the input image. -@note Rounding to nearest even is procedeed if hardware supports it, if not - to nearest. +@note + - Rounding to nearest even is procedeed if hardware supports it, if not - to nearest. The median filter uses cv::BORDER_REPLICATE internally to cope with border pixels, see cv::BorderTypes - -@note Function textual ID is "org.opencv.imgproc.filters.medianBlur" + - Function textual ID is "org.opencv.imgproc.filters.medianBlur" @param src input matrix (image) @param ksize aperture linear size; it must be odd and greater than 1, for example: 3, 5, 7 ... @sa boxFilter, gaussianBlur */ -GAPI_EXPORTS GMat medianBlur(const GMat& src, int ksize); +GAPI_EXPORTS_W GMat medianBlur(const GMat& src, int ksize); /** @brief Erodes an image by using a specific structuring element. @@ -467,9 +685,9 @@ shape of a pixel neighborhood over which the minimum is taken: Erosion can be applied several (iterations) times. In case of multi-channel images, each channel is processed independently. Supported input matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, and @ref CV_32FC1. Output image must have the same type, size, and number of channels as the input image. -@note Rounding to nearest even is procedeed if hardware supports it, if not - to nearest. - -@note Function textual ID is "org.opencv.imgproc.filters.erode" +@note + - Rounding to nearest even is procedeed if hardware supports it, if not - to nearest. + - Function textual ID is "org.opencv.imgproc.filters.erode" @param src input image @param kernel structuring element used for erosion; if `element=Mat()`, a `3 x 3` rectangular @@ -479,7 +697,7 @@ anchor is at the element center. @param iterations number of times erosion is applied. @param borderType pixel extrapolation method, see cv::BorderTypes @param borderValue border value in case of a constant border -@sa dilate +@sa dilate, morphologyEx */ GAPI_EXPORTS GMat erode(const GMat& src, const Mat& kernel, const Point& anchor = Point(-1,-1), int iterations = 1, int borderType = BORDER_CONSTANT, @@ -491,7 +709,9 @@ The function erodes the source image using the rectangular structuring element w Erosion can be applied several (iterations) times. In case of multi-channel images, each channel is processed independently. Supported input matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, and @ref CV_32FC1. Output image must have the same type, size, and number of channels as the input image. -@note Rounding to nearest even is procedeed if hardware supports it, if not - to nearest. +@note + - Rounding to nearest even is procedeed if hardware supports it, if not - to nearest. + - Function textual ID is "org.opencv.imgproc.filters.erode" @param src input image @param iterations number of times erosion is applied. @@ -512,9 +732,9 @@ shape of a pixel neighborhood over which the maximum is taken: Dilation can be applied several (iterations) times. In case of multi-channel images, each channel is processed independently. Supported input matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, and @ref CV_32FC1. Output image must have the same type, size, and number of channels as the input image. -@note Rounding to nearest even is procedeed if hardware supports it, if not - to nearest. - -@note Function textual ID is "org.opencv.imgproc.filters.dilate" +@note + - Rounding to nearest even is procedeed if hardware supports it, if not - to nearest. + - Function textual ID is "org.opencv.imgproc.filters.dilate" @param src input image. @param kernel structuring element used for dilation; if elemenat=Mat(), a 3 x 3 rectangular @@ -539,9 +759,9 @@ shape of a pixel neighborhood over which the maximum is taken: Dilation can be applied several (iterations) times. In case of multi-channel images, each channel is processed independently. Supported input matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, and @ref CV_32FC1. Output image must have the same type, size, and number of channels as the input image. -@note Rounding to nearest even is procedeed if hardware supports it, if not - to nearest. - -@note Function textual ID is "org.opencv.imgproc.filters.dilate" +@note + - Rounding to nearest even is procedeed if hardware supports it, if not - to nearest. + - Function textual ID is "org.opencv.imgproc.filters.dilate" @param src input image. @param iterations number of times dilation is applied. @@ -554,6 +774,38 @@ GAPI_EXPORTS GMat dilate3x3(const GMat& src, int iterations = 1, int borderType = BORDER_CONSTANT, const Scalar& borderValue = morphologyDefaultBorderValue()); +/** @brief Performs advanced morphological transformations. + +The function can perform advanced morphological transformations using an erosion and dilation as +basic operations. + +Any of the operations can be done in-place. In case of multi-channel images, each channel is +processed independently. + +@note + - Function textual ID is "org.opencv.imgproc.filters.morphologyEx" + - The number of iterations is the number of times erosion or dilatation operation will be +applied. For instance, an opening operation (#MORPH_OPEN) with two iterations is equivalent to +apply successively: erode -> erode -> dilate -> dilate +(and not erode -> dilate -> erode -> dilate). + +@param src Input image. +@param op Type of a morphological operation, see #MorphTypes +@param kernel Structuring element. It can be created using #getStructuringElement. +@param anchor Anchor position within the element. Both negative values mean that the anchor is at +the kernel center. +@param iterations Number of times erosion and dilation are applied. +@param borderType Pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported. +@param borderValue Border value in case of a constant border. The default value has a special +meaning. +@sa dilate, erode, getStructuringElement + */ +GAPI_EXPORTS GMat morphologyEx(const GMat &src, const MorphTypes op, const Mat &kernel, + const Point &anchor = Point(-1,-1), + const int iterations = 1, + const BorderTypes borderType = BORDER_CONSTANT, + const Scalar &borderValue = morphologyDefaultBorderValue()); + /** @brief Calculates the first, second, third, or mixed image derivatives using an extended Sobel operator. In all cases except one, the \f$\texttt{ksize} \times \texttt{ksize}\f$ separable kernel is used to @@ -583,9 +835,9 @@ The second case corresponds to a kernel of: \f[\vecthreethree{-1}{-2}{-1}{0}{0}{0}{1}{2}{1}\f] -@note Rounding to nearest even is procedeed if hardware supports it, if not - to nearest. - -@note Function textual ID is "org.opencv.imgproc.filters.sobel" +@note + - Rounding to nearest even is procedeed if hardware supports it, if not - to nearest. + - Function textual ID is "org.opencv.imgproc.filters.sobel" @param src input image. @param ddepth output image depth, see @ref filter_depths "combinations"; in the case of @@ -634,11 +886,10 @@ The second case corresponds to a kernel of: \f[\vecthreethree{-1}{-2}{-1}{0}{0}{0}{1}{2}{1}\f] -@note First returned matrix correspons to dx derivative while the second one to dy. - -@note Rounding to nearest even is procedeed if hardware supports it, if not - to nearest. - -@note Function textual ID is "org.opencv.imgproc.filters.sobelxy" +@note + - First returned matrix correspons to dx derivative while the second one to dy. + - Rounding to nearest even is procedeed if hardware supports it, if not - to nearest. + - Function textual ID is "org.opencv.imgproc.filters.sobelxy" @param src input image. @param ddepth output image depth, see @ref filter_depths "combinations"; in the case of @@ -719,6 +970,10 @@ proportional to sigmaSpace. GAPI_EXPORTS GMat bilateralFilter(const GMat& src, int d, double sigmaColor, double sigmaSpace, int borderType = BORDER_DEFAULT); +//! @} gapi_filters + +//! @addtogroup gapi_feature +//! @{ /** @brief Finds edges in an image using the Canny algorithm. The function finds edges in the input image and marks them in the output map edges using the @@ -726,7 +981,7 @@ Canny algorithm. The smallest value between threshold1 and threshold2 is used fo largest value is used to find initial segments of strong edges. See -@note Function textual ID is "org.opencv.imgproc.filters.canny" +@note Function textual ID is "org.opencv.imgproc.feature.canny" @param image 8-bit input image. @param threshold1 first threshold for the hysteresis procedure. @@ -757,11 +1012,11 @@ described in @cite Shi94 The function can be used to initialize a point-based tracker of an object. -@note If the function is called with different values A and B of the parameter qualityLevel , and +@note + - If the function is called with different values A and B of the parameter qualityLevel , and A \> B, the vector of returned corners with qualityLevel=A will be the prefix of the output vector with qualityLevel=B . - -@note Function textual ID is "org.opencv.imgproc.goodFeaturesToTrack" + - Function textual ID is "org.opencv.imgproc.feature.goodFeaturesToTrack" @param image Input 8-bit or floating-point 32-bit, single-channel image. @param maxCorners Maximum number of corners to return. If there are more corners than are found, @@ -784,7 +1039,7 @@ or #cornerMinEigenVal. @return vector of detected corners. */ -GAPI_EXPORTS GArray goodFeaturesToTrack(const GMat &image, +GAPI_EXPORTS_W GArray goodFeaturesToTrack(const GMat &image, int maxCorners, double qualityLevel, double minDistance, @@ -795,6 +1050,8 @@ GAPI_EXPORTS GArray goodFeaturesToTrack(const GMat &image, /** @brief Equalizes the histogram of a grayscale image. +//! @} gapi_feature + The function equalizes the histogram of the input image using the following algorithm: - Calculate the histogram \f$H\f$ for src . @@ -804,18 +1061,288 @@ The function equalizes the histogram of the input image using the following algo - Transform the image using \f$H'\f$ as a look-up table: \f$\texttt{dst}(x,y) = H'(\texttt{src}(x,y))\f$ The algorithm normalizes the brightness and increases the contrast of the image. -@note The returned image is of the same size and type as input. - -@note Function textual ID is "org.opencv.imgproc.equalizeHist" +@note + - The returned image is of the same size and type as input. + - Function textual ID is "org.opencv.imgproc.equalizeHist" @param src Source 8-bit single channel image. */ GAPI_EXPORTS GMat equalizeHist(const GMat& src); -//! @} gapi_filters +//! @addtogroup gapi_shape +//! @{ +/** @brief Finds contours in a binary image. + +The function retrieves contours from the binary image using the algorithm @cite Suzuki85 . +The contours are a useful tool for shape analysis and object detection and recognition. +See squares.cpp in the OpenCV sample directory. + +@note Function textual ID is "org.opencv.imgproc.shape.findContours" + +@param src Input gray-scale image @ref CV_8UC1. Non-zero pixels are treated as 1's. Zero +pixels remain 0's, so the image is treated as binary . You can use #compare, #inRange, #threshold , +#adaptiveThreshold, #Canny, and others to create a binary image out of a grayscale or color one. +If mode equals to #RETR_CCOMP, the input can also be a 32-bit integer +image of labels ( @ref CV_32SC1 ). If #RETR_FLOODFILL then @ref CV_32SC1 is supported only. +@param mode Contour retrieval mode, see #RetrievalModes +@param method Contour approximation method, see #ContourApproximationModes +@param offset Optional offset by which every contour point is shifted. This is useful if the +contours are extracted from the image ROI and then they should be analyzed in the whole image +context. + +@return GArray of detected contours. Each contour is stored as a GArray of points. + */ +GAPI_EXPORTS GArray> +findContours(const GMat &src, const RetrievalModes mode, const ContourApproximationModes method, + const GOpaque &offset); + +// FIXME oc: make default value offset = Point() +/** @overload +@note Function textual ID is "org.opencv.imgproc.shape.findContoursNoOffset" + */ +GAPI_EXPORTS GArray> +findContours(const GMat &src, const RetrievalModes mode, const ContourApproximationModes method); + +/** @brief Finds contours and their hierarchy in a binary image. + +The function retrieves contours from the binary image using the algorithm @cite Suzuki85 +and calculates their hierarchy. +The contours are a useful tool for shape analysis and object detection and recognition. +See squares.cpp in the OpenCV sample directory. + +@note Function textual ID is "org.opencv.imgproc.shape.findContoursH" + +@param src Input gray-scale image @ref CV_8UC1. Non-zero pixels are treated as 1's. Zero +pixels remain 0's, so the image is treated as binary . You can use #compare, #inRange, #threshold , +#adaptiveThreshold, #Canny, and others to create a binary image out of a grayscale or color one. +If mode equals to #RETR_CCOMP, the input can also be a 32-bit integer +image of labels ( @ref CV_32SC1 ). If #RETR_FLOODFILL -- @ref CV_32SC1 supports only. +@param mode Contour retrieval mode, see #RetrievalModes +@param method Contour approximation method, see #ContourApproximationModes +@param offset Optional offset by which every contour point is shifted. This is useful if the +contours are extracted from the image ROI and then they should be analyzed in the whole image +context. + +@return + - GArray of detected contours. Each contour is stored as a GArray of points. + - Optional output GArray of cv::Vec4i, containing information about the image topology. +It has as many elements as the number of contours. For each i-th contour contours[i], the elements +hierarchy[i][0] , hierarchy[i][1] , hierarchy[i][2] , and hierarchy[i][3] are set to 0-based +indices in contours of the next and previous contours at the same hierarchical level, the first +child contour and the parent contour, respectively. If for the contour i there are no next, +previous, parent, or nested contours, the corresponding elements of hierarchy[i] will be negative. + */ +GAPI_EXPORTS std::tuple>,GArray> +findContoursH(const GMat &src, const RetrievalModes mode, const ContourApproximationModes method, + const GOpaque &offset); + +// FIXME oc: make default value offset = Point() +/** @overload +@note Function textual ID is "org.opencv.imgproc.shape.findContoursHNoOffset" + */ +GAPI_EXPORTS std::tuple>,GArray> +findContoursH(const GMat &src, const RetrievalModes mode, const ContourApproximationModes method); + +/** @brief Calculates the up-right bounding rectangle of a point set or non-zero pixels +of gray-scale image. + +The function calculates and returns the minimal up-right bounding rectangle for the specified +point set or non-zero pixels of gray-scale image. + +@note + - Function textual ID is "org.opencv.imgproc.shape.boundingRectMat" + - In case of a 2D points' set given, Mat should be 2-dimensional, have a single row or column +if there are 2 channels, or have 2 columns if there is a single channel. Mat should have either +@ref CV_32S or @ref CV_32F depth + +@param src Input gray-scale image @ref CV_8UC1; or input set of @ref CV_32S or @ref CV_32F +2D points stored in Mat. + */ +GAPI_EXPORTS GOpaque boundingRect(const GMat& src); + +/** @overload + +Calculates the up-right bounding rectangle of a point set. + +@note Function textual ID is "org.opencv.imgproc.shape.boundingRectVector32S" + +@param src Input 2D point set, stored in std::vector. + */ +GAPI_EXPORTS GOpaque boundingRect(const GArray& src); + +/** @overload + +Calculates the up-right bounding rectangle of a point set. + +@note Function textual ID is "org.opencv.imgproc.shape.boundingRectVector32F" + +@param src Input 2D point set, stored in std::vector. + */ +GAPI_EXPORTS GOpaque boundingRect(const GArray& src); + +/** @brief Fits a line to a 2D point set. + +The function fits a line to a 2D point set by minimizing \f$\sum_i \rho(r_i)\f$ where +\f$r_i\f$ is a distance between the \f$i^{th}\f$ point, the line and \f$\rho(r)\f$ is a distance +function, one of the following: +- DIST_L2 +\f[\rho (r) = r^2/2 \quad \text{(the simplest and the fastest least-squares method)}\f] +- DIST_L1 +\f[\rho (r) = r\f] +- DIST_L12 +\f[\rho (r) = 2 \cdot ( \sqrt{1 + \frac{r^2}{2}} - 1)\f] +- DIST_FAIR +\f[\rho \left (r \right ) = C^2 \cdot \left ( \frac{r}{C} - \log{\left(1 + \frac{r}{C}\right)} \right ) \quad \text{where} \quad C=1.3998\f] +- DIST_WELSCH +\f[\rho \left (r \right ) = \frac{C^2}{2} \cdot \left ( 1 - \exp{\left(-\left(\frac{r}{C}\right)^2\right)} \right ) \quad \text{where} \quad C=2.9846\f] +- DIST_HUBER +\f[\rho (r) = \fork{r^2/2}{if \(r < C\)}{C \cdot (r-C/2)}{otherwise} \quad \text{where} \quad C=1.345\f] + +The algorithm is based on the M-estimator ( ) technique +that iteratively fits the line using the weighted least-squares algorithm. After each iteration the +weights \f$w_i\f$ are adjusted to be inversely proportional to \f$\rho(r_i)\f$ . + +@note + - Function textual ID is "org.opencv.imgproc.shape.fitLine2DMat" + - In case of an N-dimentional points' set given, Mat should be 2-dimensional, have a single row +or column if there are N channels, or have N columns if there is a single channel. + +@param src Input set of 2D points stored in one of possible containers: Mat, +std::vector, std::vector, std::vector. +@param distType Distance used by the M-estimator, see #DistanceTypes. @ref DIST_USER +and @ref DIST_C are not suppored. +@param param Numerical parameter ( C ) for some types of distances. If it is 0, an optimal value +is chosen. +@param reps Sufficient accuracy for the radius (distance between the coordinate origin and the +line). 1.0 would be a good default value for reps. If it is 0, a default value is chosen. +@param aeps Sufficient accuracy for the angle. 0.01 would be a good default value for aeps. +If it is 0, a default value is chosen. + +@return Output line parameters: a vector of 4 elements (like Vec4f) - (vx, vy, x0, y0), +where (vx, vy) is a normalized vector collinear to the line and (x0, y0) is a point on the line. + */ +GAPI_EXPORTS GOpaque fitLine2D(const GMat& src, const DistanceTypes distType, + const double param = 0., const double reps = 0., + const double aeps = 0.); + +/** @overload + +@note Function textual ID is "org.opencv.imgproc.shape.fitLine2DVector32S" + + */ +GAPI_EXPORTS GOpaque fitLine2D(const GArray& src, const DistanceTypes distType, + const double param = 0., const double reps = 0., + const double aeps = 0.); + +/** @overload + +@note Function textual ID is "org.opencv.imgproc.shape.fitLine2DVector32F" + + */ +GAPI_EXPORTS GOpaque fitLine2D(const GArray& src, const DistanceTypes distType, + const double param = 0., const double reps = 0., + const double aeps = 0.); + +/** @overload + +@note Function textual ID is "org.opencv.imgproc.shape.fitLine2DVector64F" + + */ +GAPI_EXPORTS GOpaque fitLine2D(const GArray& src, const DistanceTypes distType, + const double param = 0., const double reps = 0., + const double aeps = 0.); + +/** @brief Fits a line to a 3D point set. + +The function fits a line to a 3D point set by minimizing \f$\sum_i \rho(r_i)\f$ where +\f$r_i\f$ is a distance between the \f$i^{th}\f$ point, the line and \f$\rho(r)\f$ is a distance +function, one of the following: +- DIST_L2 +\f[\rho (r) = r^2/2 \quad \text{(the simplest and the fastest least-squares method)}\f] +- DIST_L1 +\f[\rho (r) = r\f] +- DIST_L12 +\f[\rho (r) = 2 \cdot ( \sqrt{1 + \frac{r^2}{2}} - 1)\f] +- DIST_FAIR +\f[\rho \left (r \right ) = C^2 \cdot \left ( \frac{r}{C} - \log{\left(1 + \frac{r}{C}\right)} \right ) \quad \text{where} \quad C=1.3998\f] +- DIST_WELSCH +\f[\rho \left (r \right ) = \frac{C^2}{2} \cdot \left ( 1 - \exp{\left(-\left(\frac{r}{C}\right)^2\right)} \right ) \quad \text{where} \quad C=2.9846\f] +- DIST_HUBER +\f[\rho (r) = \fork{r^2/2}{if \(r < C\)}{C \cdot (r-C/2)}{otherwise} \quad \text{where} \quad C=1.345\f] + +The algorithm is based on the M-estimator ( ) technique +that iteratively fits the line using the weighted least-squares algorithm. After each iteration the +weights \f$w_i\f$ are adjusted to be inversely proportional to \f$\rho(r_i)\f$ . + +@note + - Function textual ID is "org.opencv.imgproc.shape.fitLine3DMat" + - In case of an N-dimentional points' set given, Mat should be 2-dimensional, have a single row +or column if there are N channels, or have N columns if there is a single channel. + +@param src Input set of 3D points stored in one of possible containers: Mat, +std::vector, std::vector, std::vector. +@param distType Distance used by the M-estimator, see #DistanceTypes. @ref DIST_USER +and @ref DIST_C are not suppored. +@param param Numerical parameter ( C ) for some types of distances. If it is 0, an optimal value +is chosen. +@param reps Sufficient accuracy for the radius (distance between the coordinate origin and the +line). 1.0 would be a good default value for reps. If it is 0, a default value is chosen. +@param aeps Sufficient accuracy for the angle. 0.01 would be a good default value for aeps. +If it is 0, a default value is chosen. + +@return Output line parameters: a vector of 6 elements (like Vec6f) - (vx, vy, vz, x0, y0, z0), +where (vx, vy, vz) is a normalized vector collinear to the line and (x0, y0, z0) is a point on +the line. + */ +GAPI_EXPORTS GOpaque fitLine3D(const GMat& src, const DistanceTypes distType, + const double param = 0., const double reps = 0., + const double aeps = 0.); + +/** @overload + +@note Function textual ID is "org.opencv.imgproc.shape.fitLine3DVector32S" + + */ +GAPI_EXPORTS GOpaque fitLine3D(const GArray& src, const DistanceTypes distType, + const double param = 0., const double reps = 0., + const double aeps = 0.); + +/** @overload + +@note Function textual ID is "org.opencv.imgproc.shape.fitLine3DVector32F" + + */ +GAPI_EXPORTS GOpaque fitLine3D(const GArray& src, const DistanceTypes distType, + const double param = 0., const double reps = 0., + const double aeps = 0.); + +/** @overload + +@note Function textual ID is "org.opencv.imgproc.shape.fitLine3DVector64F" + + */ +GAPI_EXPORTS GOpaque fitLine3D(const GArray& src, const DistanceTypes distType, + const double param = 0., const double reps = 0., + const double aeps = 0.); + +//! @} gapi_shape //! @addtogroup gapi_colorconvert //! @{ +/** @brief Converts an image from BGR color space to RGB color space. + +The function converts an input image from BGR color space to RGB. +The conventional ranges for B, G, and R channel values are 0 to 255. + +Output image is 8-bit unsigned 3-channel image @ref CV_8UC3. + +@note Function textual ID is "org.opencv.imgproc.colorconvert.bgr2rgb" + +@param src input image: 8-bit unsigned 3-channel image @ref CV_8UC3. +@sa RGB2BGR +*/ +GAPI_EXPORTS GMat BGR2RGB(const GMat& src); + /** @brief Converts an image from RGB color space to gray-scaled. The conventional ranges for R, G, and B channel values are 0 to 255. Resulting gray color value computed as @@ -826,7 +1353,7 @@ Resulting gray color value computed as @param src input image: 8-bit unsigned 3-channel image @ref CV_8UC1. @sa RGB2YUV */ -GAPI_EXPORTS GMat RGB2Gray(const GMat& src); +GAPI_EXPORTS_W GMat RGB2Gray(const GMat& src); /** @overload Resulting gray color value computed as @@ -871,6 +1398,70 @@ Output image must be 8-bit unsigned 3-channel image @ref CV_8UC3. */ GAPI_EXPORTS GMat RGB2YUV(const GMat& src); +/** @brief Converts an image from BGR color space to I420 color space. + +The function converts an input image from BGR color space to I420. +The conventional ranges for R, G, and B channel values are 0 to 255. + +Output image must be 8-bit unsigned 1-channel image. @ref CV_8UC1. +Width of I420 output image must be the same as width of input image. +Height of I420 output image must be equal 3/2 from height of input image. + +@note Function textual ID is "org.opencv.imgproc.colorconvert.bgr2i420" + +@param src input image: 8-bit unsigned 3-channel image @ref CV_8UC3. +@sa I4202BGR +*/ +GAPI_EXPORTS GMat BGR2I420(const GMat& src); + +/** @brief Converts an image from RGB color space to I420 color space. + +The function converts an input image from RGB color space to I420. +The conventional ranges for R, G, and B channel values are 0 to 255. + +Output image must be 8-bit unsigned 1-channel image. @ref CV_8UC1. +Width of I420 output image must be the same as width of input image. +Height of I420 output image must be equal 3/2 from height of input image. + +@note Function textual ID is "org.opencv.imgproc.colorconvert.rgb2i420" + +@param src input image: 8-bit unsigned 3-channel image @ref CV_8UC3. +@sa I4202RGB +*/ +GAPI_EXPORTS GMat RGB2I420(const GMat& src); + +/** @brief Converts an image from I420 color space to BGR color space. + +The function converts an input image from I420 color space to BGR. +The conventional ranges for B, G, and R channel values are 0 to 255. + +Output image must be 8-bit unsigned 3-channel image. @ref CV_8UC3. +Width of BGR output image must be the same as width of input image. +Height of BGR output image must be equal 2/3 from height of input image. + +@note Function textual ID is "org.opencv.imgproc.colorconvert.i4202bgr" + +@param src input image: 8-bit unsigned 1-channel image @ref CV_8UC1. +@sa BGR2I420 +*/ +GAPI_EXPORTS GMat I4202BGR(const GMat& src); + +/** @brief Converts an image from I420 color space to BGR color space. + +The function converts an input image from I420 color space to BGR. +The conventional ranges for B, G, and R channel values are 0 to 255. + +Output image must be 8-bit unsigned 3-channel image. @ref CV_8UC3. +Width of RGB output image must be the same as width of input image. +Height of RGB output image must be equal 2/3 from height of input image. + +@note Function textual ID is "org.opencv.imgproc.colorconvert.i4202rgb" + +@param src input image: 8-bit unsigned 1-channel image @ref CV_8UC1. +@sa RGB2I420 +*/ +GAPI_EXPORTS GMat I4202RGB(const GMat& src); + /** @brief Converts an image from BGR color space to LUV color space. The function converts an input image from BGR color space to LUV. diff --git a/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/infer.hpp b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/infer.hpp index 50086dd8488..8e3baedb79e 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/infer.hpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/infer.hpp @@ -2,7 +2,7 @@ // It is subject to the license terms in the LICENSE file found in the top-level directory // of this distribution and at http://opencv.org/license.html. // -// Copyright (C) 2019 Intel Corporation +// Copyright (C) 2019-2020 Intel Corporation #ifndef OPENCV_GAPI_INFER_HPP @@ -16,6 +16,7 @@ #include // tuple #include // is_same, false_type +#include // all_satisfy #include // any<> #include // GKernelType[M], GBackend #include // GArg @@ -27,40 +28,54 @@ namespace cv { template class GNetworkType; namespace detail { - template - struct valid_infer2_types; - // Terminal case 1 (50/50 success) - template - struct valid_infer2_types< std::tuple, std::tuple > { - // By default, Nets are limited to GMat argument types only - // for infer2, every GMat argument may translate to either - // GArray or GArray. GArray<> part is stripped - // already at this point. - static constexpr const auto value = - std::is_same::type, cv::GMat>::value - || std::is_same::type, cv::Rect>::value; - }; +// Infer /////////////////////////////////////////////////////////////////////// +template +struct accepted_infer_types { + static constexpr const auto value = + std::is_same::type, cv::GMat>::value + || std::is_same::type, cv::GFrame>::value; +}; - // Terminal case 2 (100% failure) - template - struct valid_infer2_types< std::tuple<>, std::tuple > - : public std::false_type { - }; +template +using valid_infer_types = all_satisfy; - // Terminal case 3 (100% failure) - template - struct valid_infer2_types< std::tuple, std::tuple<> > - : public std::false_type { - }; +// Infer2 ////////////////////////////////////////////////////////////////////// - // Recursion -- generic - template - struct valid_infer2_types< std::tuple, std::tuple > { - static constexpr const auto value = - valid_infer2_types< std::tuple, std::tuple >::value - && valid_infer2_types< std::tuple, std::tuple >::value; - }; +template +struct valid_infer2_types; + +// Terminal case 1 (50/50 success) +template +struct valid_infer2_types< std::tuple, std::tuple > { + // By default, Nets are limited to GMat argument types only + // for infer2, every GMat argument may translate to either + // GArray or GArray. GArray<> part is stripped + // already at this point. + static constexpr const auto value = + std::is_same::type, cv::GMat>::value + || std::is_same::type, cv::Rect>::value; +}; + +// Terminal case 2 (100% failure) +template +struct valid_infer2_types< std::tuple<>, std::tuple > + : public std::false_type { +}; + +// Terminal case 3 (100% failure) +template +struct valid_infer2_types< std::tuple, std::tuple<> > + : public std::false_type { +}; + +// Recursion -- generic +template +struct valid_infer2_types< std::tuple, std::tuple > { + static constexpr const auto value = + valid_infer2_types< std::tuple, std::tuple >::value + && valid_infer2_types< std::tuple, std::tuple >::value; +}; } // namespace detail // TODO: maybe tuple_wrap_helper from util.hpp may help with this. @@ -76,7 +91,6 @@ public: using API = std::function; using ResultL = std::tuple< cv::GArray... >; - using APIList = std::function, Args...)>; }; // Single-return-value network definition (specialized base class) @@ -91,17 +105,48 @@ public: using API = std::function; using ResultL = cv::GArray; - using APIList = std::function, Args...)>; +}; + +// InferAPI: Accepts either GMat or GFrame for very individual network's input +template +struct InferAPI { + using type = typename std::enable_if + < detail::valid_infer_types::value + && std::tuple_size::value == sizeof...(Ts) + , std::function + >::type; +}; + +// InferAPIRoi: Accepts a rectangle and either GMat or GFrame +template +struct InferAPIRoi { + using type = typename std::enable_if + < detail::valid_infer_types::value + && std::tuple_size::value == 1u + , std::function, T)> + >::type; +}; + +// InferAPIList: Accepts a list of rectangles and list of GMat/GFrames; +// crops every input. +template +struct InferAPIList { + using type = typename std::enable_if + < detail::valid_infer_types::value + && std::tuple_size::value == sizeof...(Ts) + , std::function, Ts...)> + >::type; }; // APIList2 is also template to allow different calling options // (GArray vs GArray per input) -template +template struct InferAPIList2 { using type = typename std::enable_if - < cv::detail::valid_infer2_types< typename Net::InArgs + < detail::valid_infer_types::value && + cv::detail::valid_infer2_types< typename Net::InArgs , std::tuple >::value, - std::function...)> + std::function...)> >::type; }; @@ -114,22 +159,75 @@ struct InferAPIList2 { // a particular backend, not by a network itself. struct GInferBase { static constexpr const char * id() { - return "org.opencv.dnn.infer"; // Universal stub + return "org.opencv.dnn.infer"; // Universal stub } static GMetaArgs getOutMeta(const GMetaArgs &, const GArgs &) { - return GMetaArgs{}; // One more universal stub + return GMetaArgs{}; // One more universal stub } }; +// Struct stores network input/output names. +// Used by infer +struct InOutInfo +{ + std::vector in_names; + std::vector out_names; +}; + +/** + * @{ + * @brief G-API object used to collect network inputs + */ +class GAPI_EXPORTS_W_SIMPLE GInferInputs +{ +using Map = std::unordered_map; +public: + GAPI_WRAP GInferInputs(); + GAPI_WRAP void setInput(const std::string& name, const cv::GMat& value); + + cv::GMat& operator[](const std::string& name); + const Map& getBlobs() const; + +private: + std::shared_ptr in_blobs; +}; +/** @} */ + +/** + * @{ + * @brief G-API object used to collect network outputs + */ +struct GAPI_EXPORTS_W_SIMPLE GInferOutputs +{ +public: + GAPI_WRAP GInferOutputs() = default; + GInferOutputs(std::shared_ptr call); + GAPI_WRAP cv::GMat at(const std::string& name); + +private: + struct Priv; + std::shared_ptr m_priv; +}; +/** @} */ +// Base "InferROI" kernel. +// All notes from "Infer" kernel apply here as well. +struct GInferROIBase { + static constexpr const char * id() { + return "org.opencv.dnn.infer-roi"; // Universal stub + } + static GMetaArgs getOutMeta(const GMetaArgs &, const GArgs &) { + return GMetaArgs{}; // One more universal stub + } +}; // Base "Infer list" kernel. // All notes from "Infer" kernel apply here as well. struct GInferListBase { static constexpr const char * id() { - return "org.opencv.dnn.infer-roi"; // Universal stub + return "org.opencv.dnn.infer-roi-list-1"; // Universal stub } static GMetaArgs getOutMeta(const GMetaArgs &, const GArgs &) { - return GMetaArgs{}; // One more universal stub + return GMetaArgs{}; // One more universal stub } }; @@ -137,33 +235,46 @@ struct GInferListBase { // All notes from "Infer" kernel apply here as well. struct GInferList2Base { static constexpr const char * id() { - return "org.opencv.dnn.infer-roi-list"; // Universal stub + return "org.opencv.dnn.infer-roi-list-2"; // Universal stub } static GMetaArgs getOutMeta(const GMetaArgs &, const GArgs &) { - return GMetaArgs{}; // One more universal stub + return GMetaArgs{}; // One more universal stub } }; // A generic inference kernel. API (::on()) is fully defined by the Net // template parameter. // Acts as a regular kernel in graph (via KernelTypeMedium). -template +template struct GInfer final : public GInferBase - , public detail::KernelTypeMedium< GInfer - , typename Net::API > { + , public detail::KernelTypeMedium< GInfer + , typename InferAPI::type > { using GInferBase::getOutMeta; // FIXME: name lookup conflict workaround? static constexpr const char* tag() { return Net::tag(); } }; +// A specific roi-inference kernel. API (::on()) is fixed here and +// verified against Net. +template +struct GInferROI final + : public GInferROIBase + , public detail::KernelTypeMedium< GInferROI + , typename InferAPIRoi::type > { + using GInferROIBase::getOutMeta; // FIXME: name lookup conflict workaround? + + static constexpr const char* tag() { return Net::tag(); } +}; + + // A generic roi-list inference kernel. API (::on()) is derived from // the Net template parameter (see more in infer<> overload). -template +template struct GInferList final : public GInferListBase - , public detail::KernelTypeMedium< GInferList - , typename Net::APIList > { + , public detail::KernelTypeMedium< GInferList + , typename InferAPIList::type > { using GInferListBase::getOutMeta; // FIXME: name lookup conflict workaround? static constexpr const char* tag() { return Net::tag(); } @@ -174,11 +285,11 @@ struct GInferList final // overload). // Takes an extra variadic template list to reflect how this network // was called (with Rects or GMats as array parameters) -template +template struct GInferList2 final : public GInferList2Base - , public detail::KernelTypeMedium< GInferList2 - , typename InferAPIList2::type > { + , public detail::KernelTypeMedium< GInferList2 + , typename InferAPIList2::type > { using GInferList2Base::getOutMeta; // FIXME: name lookup conflict workaround? static constexpr const char* tag() { return Net::tag(); } @@ -195,6 +306,23 @@ struct GInferList2 final namespace cv { namespace gapi { +/** @brief Calculates response for the specified network (template + * parameter) for the specified region in the source image. + * Currently expects a single-input network only. + * + * @tparam A network type defined with G_API_NET() macro. + * @param in input image where to take ROI from. + * @param roi an object describing the region of interest + * in the source image. May be calculated in the same graph dynamically. + * @return an object of return type as defined in G_API_NET(). + * If a network has multiple return values (defined with a tuple), a tuple of + * objects of appropriate type is returned. + * @sa G_API_NET() + */ +template +typename Net::Result infer(cv::GOpaque roi, T in) { + return GInferROI::on(roi, in); +} /** @brief Calculates responses for the specified network (template * parameter) for every region in the source image. @@ -211,7 +339,7 @@ namespace gapi { */ template typename Net::ResultL infer(cv::GArray roi, Args&&... args) { - return GInferList::on(roi, std::forward(args)...); + return GInferList::on(roi, std::forward(args)...); } /** @brief Calculates responses for the specified network (template @@ -231,11 +359,12 @@ typename Net::ResultL infer(cv::GArray roi, Args&&... args) { * GArray<> objects is returned with the appropriate types inside. * @sa G_API_NET() */ -template -typename Net::ResultL infer2(cv::GMat image, cv::GArray... args) { + +template +typename Net::ResultL infer2(T image, cv::GArray... args) { // FIXME: Declared as "2" because in the current form it steals // overloads from the regular infer - return GInferList2::on(image, args...); + return GInferList2::on(image, args...); } /** @@ -251,9 +380,54 @@ typename Net::ResultL infer2(cv::GMat image, cv::GArray... args) { */ template typename Net::Result infer(Args&&... args) { - return GInfer::on(std::forward(args)...); + return GInfer::on(std::forward(args)...); } +/** + * @brief Special network type + */ +struct Generic { }; + +/** + * @brief Calculates response for generic network + * + * @param tag a network tag + * @param inputs networks's inputs + * @return a GInferOutputs + */ +template GInferOutputs +infer(const std::string& tag, const GInferInputs& inputs) +{ + std::vector input_args; + std::vector input_names; + + const auto& blobs = inputs.getBlobs(); + for (auto&& p : blobs) + { + input_names.push_back(p.first); + input_args.emplace_back(p.second); + } + + GKinds kinds(blobs.size(), cv::detail::OpaqueKind::CV_MAT); + auto call = std::make_shared(GKernel{ + GInferBase::id(), + tag, + GInferBase::getOutMeta, + {}, // outShape will be filled later + std::move(kinds), + {}, // outCtors will be filled later + }); + + call->setArgs(std::move(input_args)); + call->params() = InOutInfo{input_names, {}}; + + return GInferOutputs{std::move(call)}; +} + +GAPI_EXPORTS_W inline GInferOutputs infer(const String& name, const GInferInputs& inputs) +{ + return infer(name, inputs); +} } // namespace gapi } // namespace cv @@ -283,9 +457,9 @@ struct GAPI_EXPORTS GNetParam { * * @sa cv::gapi::networks */ -struct GAPI_EXPORTS GNetPackage { - GNetPackage() : GNetPackage({}) {} - explicit GNetPackage(std::initializer_list &&ii); +struct GAPI_EXPORTS_W_SIMPLE GNetPackage { + GAPI_WRAP GNetPackage() = default; + explicit GNetPackage(std::initializer_list ii); std::vector backends() const; std::vector networks; }; diff --git a/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/infer/bindings_ie.hpp b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/infer/bindings_ie.hpp new file mode 100644 index 00000000000..fdd4128b1ae --- /dev/null +++ b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/infer/bindings_ie.hpp @@ -0,0 +1,56 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Copyright (C) 2020 Intel Corporation + +#ifndef OPENCV_GAPI_INFER_BINDINGS_IE_HPP +#define OPENCV_GAPI_INFER_BINDINGS_IE_HPP + +#include +#include "opencv2/gapi/own/exports.hpp" // GAPI_EXPORTS +#include // GKernelPackage +#include // Params + +#include + +namespace cv { +namespace gapi { +namespace ie { + +// NB: Used by python wrapper +// This class can be marked as SIMPLE, because it's implemented as pimpl +class GAPI_EXPORTS_W_SIMPLE PyParams { +public: + PyParams() = default; + + PyParams(const std::string &tag, + const std::string &model, + const std::string &weights, + const std::string &device); + + PyParams(const std::string &tag, + const std::string &model, + const std::string &device); + + GBackend backend() const; + std::string tag() const; + cv::util::any params() const; + +private: + std::shared_ptr> m_priv; +}; + +GAPI_EXPORTS_W PyParams params(const std::string &tag, + const std::string &model, + const std::string &weights, + const std::string &device); + +GAPI_EXPORTS_W PyParams params(const std::string &tag, + const std::string &model, + const std::string &device); +} // namespace ie +} // namespace gapi +} // namespace cv + +#endif // OPENCV_GAPI_INFER_BINDINGS_IE_HPP diff --git a/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/infer/ie.hpp b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/infer/ie.hpp index c6d7f272a8c..53e31fbb099 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/infer/ie.hpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/infer/ie.hpp @@ -11,12 +11,14 @@ #include #include #include // tuple, tuple_size +#include #include #include #include // GAPI_EXPORTS #include // GKernelPackage +#include // Generic namespace cv { namespace gapi { @@ -41,6 +43,8 @@ enum class TraitAs: int IMAGE //!< G-API traits an associated cv::Mat as an image so creates an "image" blob (NCHW/NHWC, etc) }; +using IEConfig = std::map; + namespace detail { struct ParamDesc { std::string model_path; @@ -58,6 +62,11 @@ namespace detail { // (e.g. topology's partial execution) std::size_t num_in; // How many inputs are defined in the operation std::size_t num_out; // How many outputs are defined in the operation + + enum class Kind { Load, Import }; + Kind kind; + bool is_generic; + IEConfig config; }; } // namespace detail @@ -80,7 +89,19 @@ public: : desc{ model, weights, device, {}, {}, {} , std::tuple_size::value // num_in , std::tuple_size::value // num_out - } { + , detail::ParamDesc::Kind::Load + , false + , {}} { + }; + + Params(const std::string &model, + const std::string &device) + : desc{ model, {}, device, {}, {}, {} + , std::tuple_size::value // num_in + , std::tuple_size::value // num_out + , detail::ParamDesc::Kind::Import + , false + , {}} { }; Params& cfgInputLayers(const typename PortCfg::In &ll) { @@ -106,18 +127,65 @@ public: return *this; } + Params& pluginConfig(IEConfig&& cfg) { + desc.config = std::move(cfg); + return *this; + } + + Params& pluginConfig(const IEConfig& cfg) { + desc.config = cfg; + return *this; + } + // BEGIN(G-API's network parametrization API) - GBackend backend() const { return cv::gapi::ie::backend(); } - std::string tag() const { return Net::tag(); } - cv::util::any params() const { return { desc }; } + GBackend backend() const { return cv::gapi::ie::backend(); } + std::string tag() const { return Net::tag(); } + cv::util::any params() const { return { desc }; } // END(G-API's network parametrization API) protected: detail::ParamDesc desc; }; +template<> +class Params { +public: + Params(const std::string &tag, + const std::string &model, + const std::string &weights, + const std::string &device) + : desc{ model, weights, device, {}, {}, {}, 0u, 0u, detail::ParamDesc::Kind::Load, true, {}}, m_tag(tag) { + }; + + Params(const std::string &tag, + const std::string &model, + const std::string &device) + : desc{ model, {}, device, {}, {}, {}, 0u, 0u, detail::ParamDesc::Kind::Import, true, {}}, m_tag(tag) { + }; + + Params& pluginConfig(IEConfig&& cfg) { + desc.config = std::move(cfg); + return *this; + } + + Params& pluginConfig(const IEConfig& cfg) { + desc.config = cfg; + return *this; + } + + // BEGIN(G-API's network parametrization API) + GBackend backend() const { return cv::gapi::ie::backend(); } + std::string tag() const { return m_tag; } + cv::util::any params() const { return { desc }; } + // END(G-API's network parametrization API) + +protected: + detail::ParamDesc desc; + std::string m_tag; +}; + } // namespace ie } // namespace gapi } // namespace cv -#endif // OPENCV_GAPI_INFER_HPP +#endif // OPENCV_GAPI_INFER_IE_HPP diff --git a/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/infer/onnx.hpp b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/infer/onnx.hpp new file mode 100644 index 00000000000..d61ceb3dca9 --- /dev/null +++ b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/infer/onnx.hpp @@ -0,0 +1,138 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Copyright (C) 2020 Intel Corporation + +#ifndef OPENCV_GAPI_INFER_ONNX_HPP +#define OPENCV_GAPI_INFER_ONNX_HPP + +#include +#include +#include +#include // tuple, tuple_size + +#include +#include + +#include // GAPI_EXPORTS +#include // GKernelPackage + +namespace cv { +namespace gapi { +namespace onnx { + +GAPI_EXPORTS cv::gapi::GBackend backend(); + +enum class TraitAs: int { + TENSOR, //!< G-API traits an associated cv::Mat as a raw tensor + // and passes dimensions as-is + IMAGE //!< G-API traits an associated cv::Mat as an image so + // creates an "image" blob (NCHW/NHWC, etc) +}; + +using PostProc = std::function &, + std::unordered_map &)>; + + +namespace detail { +struct ParamDesc { + std::string model_path; + + // NB: nun_* may differ from topology's real input/output port numbers + // (e.g. topology's partial execution) + std::size_t num_in; // How many inputs are defined in the operation + std::size_t num_out; // How many outputs are defined in the operation + + // NB: Here order follows the `Net` API + std::vector input_names; + std::vector output_names; + + using ConstInput = std::pair; + std::unordered_map const_inputs; + + std::vector mean; + std::vector stdev; + + std::vector out_metas; + PostProc custom_post_proc; + + std::vector normalize; +}; +} // namespace detail + +template +struct PortCfg { + using In = std::array + < std::string + , std::tuple_size::value >; + using Out = std::array + < std::string + , std::tuple_size::value >; + using NormCoefs = std::array + < cv::Scalar + , std::tuple_size::value >; + using Normalize = std::array + < bool + , std::tuple_size::value >; +}; + +template class Params { +public: + Params(const std::string &model) { + desc.model_path = model; + desc.num_in = std::tuple_size::value; + desc.num_out = std::tuple_size::value; + }; + + // BEGIN(G-API's network parametrization API) + GBackend backend() const { return cv::gapi::onnx::backend(); } + std::string tag() const { return Net::tag(); } + cv::util::any params() const { return { desc }; } + // END(G-API's network parametrization API) + + Params& cfgInputLayers(const typename PortCfg::In &ll) { + desc.input_names.assign(ll.begin(), ll.end()); + return *this; + } + + Params& cfgOutputLayers(const typename PortCfg::Out &ll) { + desc.output_names.assign(ll.begin(), ll.end()); + return *this; + } + + Params& constInput(const std::string &layer_name, + const cv::Mat &data, + TraitAs hint = TraitAs::TENSOR) { + desc.const_inputs[layer_name] = {data, hint}; + return *this; + } + + Params& cfgMeanStd(const typename PortCfg::NormCoefs &m, + const typename PortCfg::NormCoefs &s) { + desc.mean.assign(m.begin(), m.end()); + desc.stdev.assign(s.begin(), s.end()); + return *this; + } + + Params& cfgPostProc(const std::vector &outs, + const PostProc &pp) { + desc.out_metas = outs; + desc.custom_post_proc = pp; + return *this; + } + + Params& cfgNormalize(const typename PortCfg::Normalize &n) { + desc.normalize.assign(n.begin(), n.end()); + return *this; + } + +protected: + detail::ParamDesc desc; +}; + +} // namespace onnx +} // namespace gapi +} // namespace cv + +#endif // OPENCV_GAPI_INFER_HPP diff --git a/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/infer/parsers.hpp b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/infer/parsers.hpp index c3488f57995..15742c6e55c 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/infer/parsers.hpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/infer/parsers.hpp @@ -122,4 +122,16 @@ GAPI_EXPORTS std::tuple, GArray> parseYolo(const GMat& in, } // namespace gapi } // namespace cv +// Reimport parseSSD & parseYolo under their initial namespace +namespace cv { +namespace gapi { +namespace streaming { + +using cv::gapi::parseSSD; +using cv::gapi::parseYolo; + +} // namespace streaming +} // namespace gapi +} // namespace cv + #endif // OPENCV_GAPI_PARSERS_HPP diff --git a/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/media.hpp b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/media.hpp new file mode 100644 index 00000000000..f27cb809139 --- /dev/null +++ b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/media.hpp @@ -0,0 +1,73 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Copyright (C) 2020 Intel Corporation + +#ifndef OPENCV_GAPI_MEDIA_HPP +#define OPENCV_GAPI_MEDIA_HPP + +#include // unique_ptr<>, shared_ptr<> +#include // array<> +#include // function<> +#include // forward<>() + +#include + +namespace cv { + +class GAPI_EXPORTS MediaFrame { +public: + enum class Access { R, W }; + class IAdapter; + class View; + using AdapterPtr = std::unique_ptr; + + MediaFrame(); + explicit MediaFrame(AdapterPtr &&); + template static cv::MediaFrame Create(Args&&...); + + View access(Access) const; + cv::GFrameDesc desc() const; + +private: + struct Priv; + std::shared_ptr m; +}; + +template +inline cv::MediaFrame cv::MediaFrame::Create(Args&&... args) { + std::unique_ptr ptr(new T(std::forward(args)...)); + return cv::MediaFrame(std::move(ptr)); +} + +class GAPI_EXPORTS MediaFrame::View final { +public: + static constexpr const size_t MAX_PLANES = 4; + using Ptrs = std::array; + using Strides = std::array; // in bytes + using Callback = std::function; + + View(Ptrs&& ptrs, Strides&& strs, Callback &&cb = [](){}); + View(const View&) = delete; + View(View&&) = default; + View& operator = (const View&) = delete; + ~View(); + + Ptrs ptr; + Strides stride; + +private: + Callback m_cb; +}; + +class GAPI_EXPORTS MediaFrame::IAdapter { +public: + virtual ~IAdapter() = 0; + virtual cv::GFrameDesc meta() const = 0; + virtual MediaFrame::View access(MediaFrame::Access) = 0; +}; + +} //namespace cv + +#endif // OPENCV_GAPI_MEDIA_HPP diff --git a/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/ocl/goclkernel.hpp b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/ocl/goclkernel.hpp index ee363c05a06..8ec388d588b 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/ocl/goclkernel.hpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/ocl/goclkernel.hpp @@ -2,7 +2,7 @@ // It is subject to the license terms in the LICENSE file found in the top-level directory // of this distribution and at http://opencv.org/license.html. // -// Copyright (C) 2018-2019 Intel Corporation +// Copyright (C) 2018-2020 Intel Corporation #ifndef OPENCV_GAPI_GOCLKERNEL_HPP @@ -75,7 +75,7 @@ public: protected: detail::VectorRef& outVecRef(int output); - detail::VectorRef& outOpaqueRef(int output); + detail::OpaqueRef& outOpaqueRef(int output); std::vector m_args; std::unordered_map m_results; diff --git a/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/opencv_includes.hpp b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/opencv_includes.hpp index 5f25fe4af76..08b2d6ed02d 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/opencv_includes.hpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/opencv_includes.hpp @@ -21,11 +21,12 @@ # include // replacement of cv's structures: namespace cv { - using Rect = gapi::own::Rect; - using Size = gapi::own::Size; - using Point = gapi::own::Point; - using Scalar = gapi::own::Scalar; - using Mat = gapi::own::Mat; + using Rect = gapi::own::Rect; + using Size = gapi::own::Size; + using Point = gapi::own::Point; + using Point2f = gapi::own::Point2f; + using Scalar = gapi::own::Scalar; + using Mat = gapi::own::Mat; } // namespace cv #endif // !defined(GAPI_STANDALONE) diff --git a/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/own/assert.hpp b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/own/assert.hpp index d0e0f1c3ff2..d50543fdac9 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/own/assert.hpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/own/assert.hpp @@ -2,16 +2,28 @@ // It is subject to the license terms in the LICENSE file found in the top-level directory // of this distribution and at http://opencv.org/license.html. // -// Copyright (C) 2018 Intel Corporation +// Copyright (C) 2018-2020 Intel Corporation #ifndef OPENCV_GAPI_OWN_ASSERT_HPP #define OPENCV_GAPI_OWN_ASSERT_HPP +#include + +#define GAPI_DbgAssertNoOp(expr) { \ + constexpr bool _assert_tmp = false && (expr); \ + cv::util::suppress_unused_warning(_assert_tmp); \ +} + #if !defined(GAPI_STANDALONE) #include #define GAPI_Assert CV_Assert -#define GAPI_DbgAssert CV_DbgAssert + +#if defined _DEBUG || defined CV_STATIC_ANALYSIS +# define GAPI_DbgAssert CV_DbgAssert +#else +# define GAPI_DbgAssert(expr) GAPI_DbgAssertNoOp(expr) +#endif #else #include @@ -33,7 +45,7 @@ namespace detail #ifdef NDEBUG -# define GAPI_DbgAssert(expr) +# define GAPI_DbgAssert(expr) GAPI_DbgAssertNoOp(expr) #else # define GAPI_DbgAssert(expr) GAPI_Assert(expr) #endif diff --git a/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/own/mat.hpp b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/own/mat.hpp index a10985866bd..ce9c0bf3623 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/own/mat.hpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/own/mat.hpp @@ -254,6 +254,18 @@ namespace cv { namespace gapi { namespace own { *this = std::move(tmp); } + /** @brief Creates a full copy of the matrix and the underlying data. + + The method creates a full copy of the matrix. The original step[] is not taken into account. + So, the copy has a continuous buffer occupying total() * elemSize() bytes. + */ + Mat clone() const + { + Mat m; + copyTo(m); + return m; + } + /** @brief Copies the matrix to another one. The method copies the matrix data to another matrix. Before copying the data, the method invokes : diff --git a/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/own/types.hpp b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/own/types.hpp index 20445ee0fd4..c77a62ca537 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/own/types.hpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/own/types.hpp @@ -28,6 +28,16 @@ public: int y = 0; }; +class Point2f +{ +public: + Point2f() = default; + Point2f(float _x, float _y) : x(_x), y(_y) {}; + + float x = 0.f; + float y = 0.f; +}; + class Rect { public: diff --git a/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/render/render_types.hpp b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/render/render_types.hpp index 08b14d1ddd3..ca403be361e 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/render/render_types.hpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/render/render_types.hpp @@ -252,7 +252,7 @@ struct Mosaic { } - Mosaic() = default; + Mosaic() : cellSz(0), decim(0) {} /*@{*/ cv::Rect mos; //!< Coordinates of the mosaic diff --git a/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/rmat.hpp b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/rmat.hpp index 2fbdf038f76..f50bd08b650 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/rmat.hpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/rmat.hpp @@ -8,6 +8,17 @@ #define OPENCV_GAPI_RMAT_HPP #include +#include + +// Forward declaration +namespace cv { +namespace gapi { +namespace s11n { + struct IOStream; + struct IIStream; +} // namespace s11n +} // namespace gapi +} // namespace cv namespace cv { @@ -31,7 +42,7 @@ namespace cv { // performCalculations(in_view, out_view); // // data from out_view is transferred to the device when out_view is destroyed // } -class RMat +class GAPI_EXPORTS RMat { public: // A lightweight wrapper on image data: @@ -39,43 +50,50 @@ public: // - Doesn't implement copy semantics (it's assumed that a view is created each time // wrapped data is being accessed); // - Has an optional callback which is called when the view is destroyed. - class View + class GAPI_EXPORTS View { public: using DestroyCallback = std::function; + using stepsT = std::vector; View() = default; - View(const GMatDesc& desc, uchar* data, size_t step = 0u, DestroyCallback&& cb = nullptr) - : m_desc(desc), m_data(data), m_step(step == 0u ? elemSize()*cols() : step), m_cb(cb) - {} + View(const GMatDesc& desc, uchar* data, const stepsT& steps = {}, DestroyCallback&& cb = nullptr); + View(const GMatDesc& desc, uchar* data, size_t step, DestroyCallback&& cb = nullptr); View(const View&) = delete; - View(View&&) = default; View& operator=(const View&) = delete; - View& operator=(View&&) = default; + View(View&&) = default; + View& operator=(View&& v); ~View() { if (m_cb) m_cb(); } cv::Size size() const { return m_desc.size; } const std::vector& dims() const { return m_desc.dims; } int cols() const { return m_desc.size.width; } int rows() const { return m_desc.size.height; } - int type() const { return CV_MAKE_TYPE(depth(), chan()); } + int type() const; int depth() const { return m_desc.depth; } int chan() const { return m_desc.chan; } size_t elemSize() const { return CV_ELEM_SIZE(type()); } - template T* ptr(int y = 0, int x = 0) { - return reinterpret_cast(m_data + m_step*y + x*CV_ELEM_SIZE(type())); + template T* ptr(int y = 0) { + return reinterpret_cast(m_data + step()*y); } - template const T* ptr(int y = 0, int x = 0) const { - return reinterpret_cast(m_data + m_step*y + x*CV_ELEM_SIZE(type())); + template const T* ptr(int y = 0) const { + return reinterpret_cast(m_data + step()*y); } - size_t step() const { return m_step; } + template T* ptr(int y, int x) { + return reinterpret_cast(m_data + step()*y + step(1)*x); + } + template const T* ptr(int y, int x) const { + return reinterpret_cast(m_data + step()*y + step(1)*x); + } + size_t step(size_t i = 0) const { GAPI_DbgAssert(i; @@ -112,6 +136,10 @@ public: return dynamic_cast(m_adapter.get()); } + void serialize(cv::gapi::s11n::IOStream& os) const { + m_adapter->serialize(os); + } + private: AdapterP m_adapter = nullptr; }; diff --git a/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/s11n.hpp b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/s11n.hpp index c669b732a2c..0e2c4c239b9 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/s11n.hpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/s11n.hpp @@ -8,21 +8,27 @@ #define OPENCV_GAPI_S11N_HPP #include +#include +#include +#include #include +#include namespace cv { namespace gapi { namespace detail { GAPI_EXPORTS cv::GComputation getGraph(const std::vector &p); -} // namespace detail -namespace detail { GAPI_EXPORTS cv::GMetaArgs getMetaArgs(const std::vector &p); -} // namespace detail -namespace detail { GAPI_EXPORTS cv::GRunArgs getRunArgs(const std::vector &p); + + template + cv::GCompileArgs getCompileArgs(const std::vector &p); + + template + cv::GRunArgs getRunArgsWithRMats(const std::vector &p); } // namespace detail GAPI_EXPORTS std::vector serialize(const cv::GComputation &c); @@ -33,6 +39,7 @@ T deserialize(const std::vector &p); //} //ananymous namespace +GAPI_EXPORTS std::vector serialize(const cv::GCompileArgs&); GAPI_EXPORTS std::vector serialize(const cv::GMetaArgs&); GAPI_EXPORTS std::vector serialize(const cv::GRunArgs&); @@ -51,7 +58,307 @@ cv::GRunArgs deserialize(const std::vector &p) { return detail::getRunArgs(p); } +template inline +typename std::enable_if::value, GCompileArgs>:: +type deserialize(const std::vector &p) { + return detail::getCompileArgs(p); +} +template inline +typename std::enable_if::value, GRunArgs>:: +type deserialize(const std::vector &p) { + return detail::getRunArgsWithRMats(p); +} +} // namespace gapi +} // namespace cv + +namespace cv { +namespace gapi { +namespace s11n { +struct GAPI_EXPORTS IOStream { + virtual ~IOStream() = default; + // Define the native support for basic C++ types at the API level: + virtual IOStream& operator<< (bool) = 0; + virtual IOStream& operator<< (char) = 0; + virtual IOStream& operator<< (unsigned char) = 0; + virtual IOStream& operator<< (short) = 0; + virtual IOStream& operator<< (unsigned short) = 0; + virtual IOStream& operator<< (int) = 0; + virtual IOStream& operator<< (uint32_t) = 0; + virtual IOStream& operator<< (uint64_t) = 0; + virtual IOStream& operator<< (float) = 0; + virtual IOStream& operator<< (double) = 0; + virtual IOStream& operator<< (const std::string&) = 0; +}; + +struct GAPI_EXPORTS IIStream { + virtual ~IIStream() = default; + virtual IIStream& operator>> (bool &) = 0; + virtual IIStream& operator>> (std::vector::reference) = 0; + virtual IIStream& operator>> (char &) = 0; + virtual IIStream& operator>> (unsigned char &) = 0; + virtual IIStream& operator>> (short &) = 0; + virtual IIStream& operator>> (unsigned short &) = 0; + virtual IIStream& operator>> (int &) = 0; + virtual IIStream& operator>> (float &) = 0; + virtual IIStream& operator>> (double &) = 0; + virtual IIStream& operator >> (uint32_t &) = 0; + virtual IIStream& operator >> (uint64_t &) = 0; + virtual IIStream& operator>> (std::string &) = 0; +}; + +namespace detail { +GAPI_EXPORTS std::unique_ptr getInStream(const std::vector &p); +} // namespace detail + +//////////////////////////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////////////////////////// +// S11N operators +// Note: operators for basic types are defined in IIStream/IOStream + +// OpenCV types //////////////////////////////////////////////////////////////// + +GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::Point &pt); +GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::Point &pt); + +GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::Point2f &pt); +GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::Point2f &pt); + +GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::Size &sz); +GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::Size &sz); + +GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::Rect &rc); +GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::Rect &rc); + +GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::Scalar &s); +GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::Scalar &s); + +GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::Mat &m); +GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::Mat &m); + +// FIXME: for GRunArgs serailization +#if !defined(GAPI_STANDALONE) +GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::UMat &); +GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::UMat &); +#endif // !defined(GAPI_STANDALONE) + +GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::RMat &r); +GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::RMat &r); + +GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::gapi::wip::IStreamSource::Ptr &); +GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::gapi::wip::IStreamSource::Ptr &); + +GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::detail::VectorRef &); +GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::detail::VectorRef &); + +GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::detail::OpaqueRef &); +GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::detail::OpaqueRef &); + +GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::MediaFrame &); +GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::MediaFrame &); + +// Generic STL types //////////////////////////////////////////////////////////////// +template +IOStream& operator<< (IOStream& os, const std::map &m) { + const uint32_t sz = static_cast(m.size()); + os << sz; + for (const auto& it : m) os << it.first << it.second; + return os; +} +template +IIStream& operator>> (IIStream& is, std::map &m) { + m.clear(); + uint32_t sz = 0u; + is >> sz; + for (std::size_t i = 0; i < sz; ++i) { + K k{}; + V v{}; + is >> k >> v; + m[k] = v; + } + return is; +} +template +IOStream& operator<< (IOStream& os, const std::unordered_map &m) { + const uint32_t sz = static_cast(m.size()); + os << sz; + for (auto &&it : m) os << it.first << it.second; + return os; +} +template +IIStream& operator>> (IIStream& is, std::unordered_map &m) { + m.clear(); + uint32_t sz = 0u; + is >> sz; + for (std::size_t i = 0; i < sz; ++i) { + K k{}; + V v{}; + is >> k >> v; + m[k] = v; + } + return is; +} +template +IOStream& operator<< (IOStream& os, const std::vector &ts) { + const uint32_t sz = static_cast(ts.size()); + os << sz; + for (auto &&v : ts) os << v; + return os; +} +template +IIStream& operator>> (IIStream& is, std::vector &ts) { + uint32_t sz = 0u; + is >> sz; + if (sz == 0u) { + ts.clear(); + } + else { + ts.resize(sz); + for (std::size_t i = 0; i < sz; ++i) is >> ts[i]; + } + return is; +} + +// Generic: variant serialization +namespace detail { +template +IOStream& put_v(IOStream&, const V&, std::size_t) { + GAPI_Assert(false && "variant>>: requested index is invalid"); +}; +template +IOStream& put_v(IOStream& os, const V& v, std::size_t x) { + return (x == 0u) + ? os << cv::util::get(v) + : put_v(os, v, x-1); +} +template +IIStream& get_v(IIStream&, V&, std::size_t, std::size_t) { + GAPI_Assert(false && "variant<<: requested index is invalid"); +} +template +IIStream& get_v(IIStream& is, V& v, std::size_t i, std::size_t gi) { + if (i == gi) { + X x{}; + is >> x; + v = V{std::move(x)}; + return is; + } else return get_v(is, v, i+1, gi); +} +} // namespace detail + +template +IOStream& operator<< (IOStream& os, const cv::util::variant &v) { + os << static_cast(v.index()); + return detail::put_v, Ts...>(os, v, v.index()); +} +template +IIStream& operator>> (IIStream& is, cv::util::variant &v) { + int idx = -1; + is >> idx; + GAPI_Assert(idx >= 0 && idx < (int)sizeof...(Ts)); + return detail::get_v, Ts...>(is, v, 0u, idx); +} + +// FIXME: consider a better solution +template +void getRunArgByIdx (IIStream& is, cv::util::variant &v, uint32_t idx) { + is = detail::get_v, Ts...>(is, v, 0u, idx); +} +} // namespace s11n + +namespace detail +{ +template struct try_deserialize_comparg; + +template<> struct try_deserialize_comparg> { +static cv::util::optional exec(const std::string&, cv::gapi::s11n::IIStream&) { + return { }; + } +}; + +template +struct try_deserialize_comparg> { +static cv::util::optional exec(const std::string& tag, cv::gapi::s11n::IIStream& is) { + if (tag == cv::detail::CompileArgTag::tag()) { + static_assert(cv::gapi::s11n::detail::has_S11N_spec::value, + "cv::gapi::deserialize expects Types to have S11N " + "specializations with deserialization callbacks!"); + return cv::util::optional( + GCompileArg { cv::gapi::s11n::detail::S11N::deserialize(is) }); + } + return try_deserialize_comparg>::exec(tag, is); +} +}; + +template struct deserialize_runarg; + +template +struct deserialize_runarg { +static GRunArg exec(cv::gapi::s11n::IIStream& is, uint32_t idx) { + if (idx == GRunArg::index_of()) { + auto ptr = std::make_shared(); + ptr->deserialize(is); + return GRunArg { RMat(std::move(ptr)) }; + } else { // non-RMat arg - use default deserialization + GRunArg arg; + getRunArgByIdx(is, arg, idx); + return arg; + } +} +}; + +template +inline cv::util::optional tryDeserializeCompArg(const std::string& tag, + const std::vector& sArg) { + std::unique_ptr pArgIs = cv::gapi::s11n::detail::getInStream(sArg); + return try_deserialize_comparg>::exec(tag, *pArgIs); +} + +template +cv::GCompileArgs getCompileArgs(const std::vector &sArgs) { + cv::GCompileArgs args; + + std::unique_ptr pIs = cv::gapi::s11n::detail::getInStream(sArgs); + cv::gapi::s11n::IIStream& is = *pIs; + + uint32_t sz = 0; + is >> sz; + for (uint32_t i = 0; i < sz; ++i) { + std::string tag; + is >> tag; + + std::vector sArg; + is >> sArg; + + cv::util::optional dArg = + cv::gapi::detail::tryDeserializeCompArg(tag, sArg); + + if (dArg.has_value()) + { + args.push_back(dArg.value()); + } + } + + return args; +} + +template +cv::GRunArgs getRunArgsWithRMats(const std::vector &p) { + std::unique_ptr pIs = cv::gapi::s11n::detail::getInStream(p); + cv::gapi::s11n::IIStream& is = *pIs; + cv::GRunArgs args; + + uint32_t sz = 0; + is >> sz; + for (uint32_t i = 0; i < sz; ++i) { + uint32_t idx = 0; + is >> idx; + args.push_back(cv::gapi::detail::deserialize_runarg::exec(is, idx)); + } + + return args; +} +} // namespace detail } // namespace gapi } // namespace cv diff --git a/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/s11n/base.hpp b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/s11n/base.hpp new file mode 100644 index 00000000000..d9335ee9f7c --- /dev/null +++ b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/s11n/base.hpp @@ -0,0 +1,46 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Copyright (C) 2020 Intel Corporation + +#ifndef OPENCV_GAPI_S11N_BASE_HPP +#define OPENCV_GAPI_S11N_BASE_HPP + +#include +#include + +namespace cv { +namespace gapi { +namespace s11n { +struct IOStream; +struct IIStream; + +namespace detail { + +struct NotImplemented { +}; + +// The default S11N for custom types is NotImplemented +// Don't! sublass from NotImplemented if you actually implement S11N. +template +struct S11N: public NotImplemented { + static void serialize(IOStream &, const T &) { + GAPI_Assert(false && "No serialization routine is provided!"); + } + static T deserialize(IIStream &) { + GAPI_Assert(false && "No deserialization routine is provided!"); + } +}; + +template struct has_S11N_spec { + static constexpr bool value = !std::is_base_of::type>>::value; +}; + +} // namespace detail +} // namespace s11n +} // namespace gapi +} // namespace cv + +#endif // OPENCV_GAPI_S11N_BASE_HPP diff --git a/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/streaming/cap.hpp b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/streaming/cap.hpp index faa555063af..aad6af618c9 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/streaming/cap.hpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/streaming/cap.hpp @@ -21,9 +21,11 @@ * Note for developers: please don't put videoio dependency in G-API * because of this file. */ +#include #include #include +#include namespace cv { namespace gapi { @@ -55,6 +57,7 @@ protected: cv::VideoCapture cap; cv::Mat first; bool first_pulled = false; + int64_t counter = 0; void prep() { @@ -80,19 +83,26 @@ protected: GAPI_Assert(!first.empty()); first_pulled = true; data = first; // no need to clone here since it was cloned already - return true; } - - if (!cap.isOpened()) return false; - - cv::Mat frame; - if (!cap.read(frame)) + else { - // end-of-stream happened - return false; + if (!cap.isOpened()) return false; + + cv::Mat frame; + if (!cap.read(frame)) + { + // end-of-stream happened + return false; + } + // Same reason to clone as in prep() + data = frame.clone(); } - // Same reason to clone as in prep() - data = frame.clone(); + // Tag data with seq_id/ts + const auto now = std::chrono::system_clock::now(); + const auto dur = std::chrono::duration_cast + (now.time_since_epoch()); + data.meta[cv::gapi::streaming::meta_tag::timestamp] = int64_t{dur.count()}; + data.meta[cv::gapi::streaming::meta_tag::seq_id] = int64_t{counter++}; return true; } @@ -103,6 +113,12 @@ protected: } }; +// NB: Overload for using from python +GAPI_EXPORTS_W cv::Ptr inline make_capture_src(const std::string& path) +{ + return make_src(path); +} + } // namespace wip } // namespace gapi } // namespace cv diff --git a/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/streaming/desync.hpp b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/streaming/desync.hpp new file mode 100644 index 00000000000..86de279fe94 --- /dev/null +++ b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/streaming/desync.hpp @@ -0,0 +1,84 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Copyright (C) 2020 Intel Corporation + + +#ifndef OPENCV_GAPI_GSTREAMING_DESYNC_HPP +#define OPENCV_GAPI_GSTREAMING_DESYNC_HPP + +#include + +#include +#include +#include +#include +#include + +namespace cv { +namespace gapi { +namespace streaming { + +namespace detail { +struct GDesync { + static const char *id() { + return "org.opencv.streaming.desync"; + } + + // An universal yield for desync. + // Yields output objects according to the input Types... + // Reuses gkernel machinery. + // FIXME: This function can be generic and declared in gkernel.hpp + // (it is there already, but a part of GKernelType[M] + template + static std::tuple yield(cv::GCall &call, cv::detail::Seq) { + return std::make_tuple(cv::detail::Yield::yield(call, IIs)...); + } +}; + +template +G desync(const G &g) { + cv::GKernel k{ + GDesync::id() // kernel id + , "" // kernel tag + , [](const GMetaArgs &a, const GArgs &) {return a;} // outMeta callback + , {cv::detail::GTypeTraits::shape} // output Shape + , {cv::detail::GTypeTraits::op_kind} // input data kinds + , {cv::detail::GObtainCtor::get()} // output template ctors + }; + cv::GCall call(std::move(k)); + call.pass(g); + return std::get<0>(GDesync::yield(call, cv::detail::MkSeq<1>::type())); +} +} // namespace detail + +/** + * @brief Starts a desynchronized branch in the graph. + * + * This operation takes a single G-API data object and returns a + * graph-level "duplicate" of this object. + * + * Operations which use this data object can be desynchronized + * from the rest of the graph. + * + * This operation has no effect when a GComputation is compiled with + * regular cv::GComputation::compile(), since cv::GCompiled objects + * always produce their full output vectors. + * + * This operation only makes sense when a GComputation is compiled in + * straming mode with cv::GComputation::compileStreaming(). If this + * operation is used and there are desynchronized outputs, the user + * should use a special version of cv::GStreamingCompiled::pull() + * which produces an array of cv::util::optional<> objects. + * + * @note This feature is highly experimental now and is currently + * limited to a single GMat argument only. + */ +GAPI_EXPORTS GMat desync(const GMat &g); + +} // namespace streaming +} // namespace gapi +} // namespace cv + +#endif // OPENCV_GAPI_GSTREAMING_DESYNC_HPP diff --git a/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/streaming/format.hpp b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/streaming/format.hpp new file mode 100644 index 00000000000..8bec5dc8137 --- /dev/null +++ b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/streaming/format.hpp @@ -0,0 +1,52 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Copyright (C) 2020 Intel Corporation + +#ifndef OPENCV_GAPI_GSTREAMING_FORMAT_HPP +#define OPENCV_GAPI_GSTREAMING_FORMAT_HPP + +#include // GKernelPackage + +namespace cv { +namespace gapi { +namespace streaming { + +cv::gapi::GKernelPackage kernels(); +cv::gapi::GBackend backend(); + +// FIXME: Make a generic kernel +G_API_OP(GCopy, , "org.opencv.streaming.copy") +{ + static GFrameDesc outMeta(const GFrameDesc& in) { return in; } +}; + +G_API_OP(GBGR, , "org.opencv.streaming.BGR") +{ + static GMatDesc outMeta(const GFrameDesc& in) { return GMatDesc{CV_8U, 3, in.size}; } +}; + +/** @brief Gets copy from the input frame + +@note Function textual ID is "org.opencv.streaming.copy" + +@param in Input frame +@return Copy of the input frame +*/ +GAPI_EXPORTS cv::GFrame copy(const cv::GFrame& in); + +/** @brief Gets bgr plane from input frame + +@note Function textual ID is "org.opencv.streaming.BGR" + +@param in Input frame +@return Image in BGR format +*/ +GAPI_EXPORTS cv::GMat BGR (const cv::GFrame& in); + +} // namespace streaming +} // namespace gapi +} // namespace cv + +#endif // OPENCV_GAPI_GSTREAMING_FORMAT_HPP diff --git a/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/streaming/meta.hpp b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/streaming/meta.hpp new file mode 100644 index 00000000000..cbcfc3aa37c --- /dev/null +++ b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/streaming/meta.hpp @@ -0,0 +1,79 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Copyright (C) 2020 Intel Corporation + + +#ifndef OPENCV_GAPI_GSTREAMING_META_HPP +#define OPENCV_GAPI_GSTREAMING_META_HPP + +#include +#include +#include +#include + +namespace cv { +namespace gapi { +namespace streaming { + +// FIXME: the name is debatable +namespace meta_tag { +static constexpr const char * timestamp = "org.opencv.gapi.meta.timestamp"; +static constexpr const char * seq_id = "org.opencv.gapi.meta.seq_id"; +} // namespace meta_tag + +namespace detail { +struct GMeta { + static const char *id() { + return "org.opencv.streaming.meta"; + } + // A universal yield for meta(), same as in GDesync + template + static std::tuple yield(cv::GCall &call, cv::detail::Seq) { + return std::make_tuple(cv::detail::Yield::yield(call, IIs)...); + } + // Also a universal outMeta stub here + static GMetaArgs getOutMeta(const GMetaArgs &args, const GArgs &) { + return args; + } +}; +} // namespace detail + +template +cv::GOpaque meta(G g, const std::string &tag) { + using O = cv::GOpaque; + cv::GKernel k{ + detail::GMeta::id() // kernel id + , tag // kernel tag. Use meta tag here + , &detail::GMeta::getOutMeta // outMeta callback + , {cv::detail::GTypeTraits::shape} // output Shape + , {cv::detail::GTypeTraits::op_kind} // input data kinds + , {cv::detail::GObtainCtor::get()} // output template ctors + }; + cv::GCall call(std::move(k)); + call.pass(g); + return std::get<0>(detail::GMeta::yield(call, cv::detail::MkSeq<1>::type())); +} + +template +cv::GOpaque timestamp(G g) { + return meta(g, meta_tag::timestamp); +} + +template +cv::GOpaque seq_id(G g) { + return meta(g, meta_tag::seq_id); +} + +template +cv::GOpaque seqNo(G g) { + // Old name, compatibility only + return seq_id(g); +} + +} // namespace streaming +} // namespace gapi +} // namespace cv + +#endif // OPENCV_GAPI_GSTREAMING_META_HPP diff --git a/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/util/copy_through_move.hpp b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/util/copy_through_move.hpp new file mode 100644 index 00000000000..1a1121eb218 --- /dev/null +++ b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/util/copy_through_move.hpp @@ -0,0 +1,34 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Copyright (C) 2020 Intel Corporation + +#ifndef OPENCV_GAPI_UTIL_COPY_THROUGH_MOVE_HPP +#define OPENCV_GAPI_UTIL_COPY_THROUGH_MOVE_HPP + +#include //decay_t + +namespace cv +{ +namespace util +{ + //This is a tool to move initialize captures of a lambda in C++11 + template + struct copy_through_move_t{ + T value; + const T& get() const {return value;} + T& get() {return value;} + copy_through_move_t(T&& g) : value(std::move(g)) {} + copy_through_move_t(copy_through_move_t&&) = default; + copy_through_move_t(copy_through_move_t const& lhs) : copy_through_move_t(std::move(const_cast(lhs))) {} + }; + + template + copy_through_move_t> copy_through_move(T&& t){ + return std::forward(t); + } +} // namespace util +} // namespace cv + +#endif /* OPENCV_GAPI_UTIL_COPY_THROUGH_MOVE_HPP */ diff --git a/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/util/optional.hpp b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/util/optional.hpp index 1aa2b265d9a..26d7b64a02e 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/util/optional.hpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/util/optional.hpp @@ -35,9 +35,9 @@ namespace util // instead {} optional() {}; optional(const optional&) = default; - explicit optional(T &&value) noexcept; - explicit optional(const T &value) noexcept; - optional(optional &&) noexcept; + explicit optional(T&&) noexcept; + explicit optional(const T&) noexcept; + optional(optional&&) noexcept; // TODO: optional(nullopt_t) noexcept; // TODO: optional(const optional &) // TODO: optional(optional &&) @@ -46,8 +46,8 @@ namespace util // TODO: optional(U&& value); // Assignment - optional& operator=(const optional& rhs) = default; - optional& operator=(optional&& rhs); + optional& operator=(const optional&) = default; + optional& operator=(optional&&); // Observers T* operator-> (); diff --git a/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/video.hpp b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/video.hpp index 7f90134e6d5..10965b0aa65 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/video.hpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/include/opencv2/gapi/video.hpp @@ -16,6 +16,32 @@ */ namespace cv { namespace gapi { + +/** @brief Structure for the Kalman filter's initialization parameters.*/ + +struct GAPI_EXPORTS KalmanParams +{ + // initial state + + //! corrected state (x(k)): x(k)=x'(k)+K(k)*(z(k)-H*x'(k)) + Mat state; + //! posteriori error estimate covariance matrix (P(k)): P(k)=(I-K(k)*H)*P'(k) + Mat errorCov; + + // dynamic system description + + //! state transition matrix (A) + Mat transitionMatrix; + //! measurement matrix (H) + Mat measurementMatrix; + //! process noise covariance matrix (Q) + Mat processNoiseCov; + //! measurement noise covariance matrix (R) + Mat measurementNoiseCov; + //! control matrix (B) (Optional: not used if there's no control) + Mat controlMatrix; +}; + namespace video { using GBuildPyrOutput = std::tuple, GScalar>; @@ -62,6 +88,95 @@ G_TYPED_KERNEL(GCalcOptFlowLKForPyr, return std::make_tuple(empty_array_desc(), empty_array_desc(), empty_array_desc()); } }; + +enum BackgroundSubtractorType +{ + TYPE_BS_MOG2, + TYPE_BS_KNN +}; + +/** @brief Structure for the Background Subtractor operation's initialization parameters.*/ + +struct BackgroundSubtractorParams +{ + //! Type of the Background Subtractor operation. + BackgroundSubtractorType operation = TYPE_BS_MOG2; + + //! Length of the history. + int history = 500; + + //! For MOG2: Threshold on the squared Mahalanobis distance between the pixel + //! and the model to decide whether a pixel is well described by + //! the background model. + //! For KNN: Threshold on the squared distance between the pixel and the sample + //! to decide whether a pixel is close to that sample. + double threshold = 16; + + //! If true, the algorithm will detect shadows and mark them. + bool detectShadows = true; + + //! The value between 0 and 1 that indicates how fast + //! the background model is learnt. + //! Negative parameter value makes the algorithm use some automatically + //! chosen learning rate. + double learningRate = -1; + + //! default constructor + BackgroundSubtractorParams() {} + + /** Full constructor + @param op MOG2/KNN Background Subtractor type. + @param histLength Length of the history. + @param thrshld For MOG2: Threshold on the squared Mahalanobis distance between + the pixel and the model to decide whether a pixel is well described by the background model. + For KNN: Threshold on the squared distance between the pixel and the sample to decide + whether a pixel is close to that sample. + @param detect If true, the algorithm will detect shadows and mark them. It decreases the + speed a bit, so if you do not need this feature, set the parameter to false. + @param lRate The value between 0 and 1 that indicates how fast the background model is learnt. + Negative parameter value makes the algorithm to use some automatically chosen learning rate. + */ + BackgroundSubtractorParams(BackgroundSubtractorType op, int histLength, + double thrshld, bool detect, double lRate) : operation(op), + history(histLength), + threshold(thrshld), + detectShadows(detect), + learningRate(lRate){} +}; + +G_TYPED_KERNEL(GBackgroundSubtractor, , + "org.opencv.video.BackgroundSubtractor") +{ + static GMatDesc outMeta(const GMatDesc& in, const BackgroundSubtractorParams& bsParams) + { + GAPI_Assert(bsParams.history >= 0); + GAPI_Assert(bsParams.learningRate <= 1); + return in.withType(CV_8U, 1); + } +}; + +void checkParams(const cv::gapi::KalmanParams& kfParams, + const cv::GMatDesc& measurement, const cv::GMatDesc& control = {}); + +G_TYPED_KERNEL(GKalmanFilter, , GMat, KalmanParams)>, + "org.opencv.video.KalmanFilter") +{ + static GMatDesc outMeta(const GMatDesc& measurement, const GOpaqueDesc&, + const GMatDesc& control, const KalmanParams& kfParams) + { + checkParams(kfParams, measurement, control); + return measurement.withSize(Size(1, kfParams.transitionMatrix.rows)); + } +}; + +G_TYPED_KERNEL(GKalmanFilterNoControl, , KalmanParams)>, "org.opencv.video.KalmanFilterNoControl") +{ + static GMatDesc outMeta(const GMatDesc& measurement, const GOpaqueDesc&, const KalmanParams& kfParams) + { + checkParams(kfParams, measurement); + return measurement.withSize(Size(1, kfParams.transitionMatrix.rows)); + } +}; } //namespace video //! @addtogroup gapi_video @@ -83,8 +198,9 @@ G_TYPED_KERNEL(GCalcOptFlowLKForPyr, @param tryReuseInputImage put ROI of input image into the pyramid if possible. You can pass false to force data copying. -@return output pyramid. -@return number of levels in constructed pyramid. Can be less than maxLevel. +@return + - output pyramid. + - number of levels in constructed pyramid. Can be less than maxLevel. */ GAPI_EXPORTS std::tuple, GScalar> buildOpticalFlowPyramid(const GMat &img, @@ -131,11 +247,12 @@ by number of pixels in a window; if this value is less than minEigThreshold, the feature is filtered out and its flow is not processed, so it allows to remove bad points and get a performance boost. -@return GArray of 2D points (with single-precision floating-point coordinates) +@return + - GArray of 2D points (with single-precision floating-point coordinates) containing the calculated new positions of input features in the second image. -@return status GArray (of unsigned chars); each element of the vector is set to 1 if + - status GArray (of unsigned chars); each element of the vector is set to 1 if the flow for the corresponding features has been found, otherwise, it is set to 0. -@return GArray of errors (doubles); each element of the vector is set to an error for the + - GArray of errors (doubles); each element of the vector is set to an error for the corresponding feature, type of the error measure can be set in flags parameter; if the flow wasn't found then the error is not defined (use the status parameter to find such cases). */ @@ -169,8 +286,75 @@ calcOpticalFlowPyrLK(const GArray &prevPyr, int flags = 0, double minEigThresh = 1e-4); +/** @brief Gaussian Mixture-based or K-nearest neighbours-based Background/Foreground Segmentation Algorithm. +The operation generates a foreground mask. + +@return Output image is foreground mask, i.e. 8-bit unsigned 1-channel (binary) matrix @ref CV_8UC1. + +@note Functional textual ID is "org.opencv.video.BackgroundSubtractor" + +@param src input image: Floating point frame is used without scaling and should be in range [0,255]. +@param bsParams Set of initialization parameters for Background Subtractor kernel. +*/ +GAPI_EXPORTS GMat BackgroundSubtractor(const GMat& src, const cv::gapi::video::BackgroundSubtractorParams& bsParams); + +/** @brief Standard Kalman filter algorithm . + +@note Functional textual ID is "org.opencv.video.KalmanFilter" + +@param measurement input matrix: 32-bit or 64-bit float 1-channel matrix containing measurements. +@param haveMeasurement dynamic input flag that indicates whether we get measurements +at a particular iteration . +@param control input matrix: 32-bit or 64-bit float 1-channel matrix contains control data +for changing dynamic system. +@param kfParams Set of initialization parameters for Kalman filter kernel. + +@return Output matrix is predicted or corrected state. They can be 32-bit or 64-bit float +1-channel matrix @ref CV_32FC1 or @ref CV_64FC1. + +@details If measurement matrix is given (haveMeasurements == true), corrected state will +be returned which corresponds to the pipeline +cv::KalmanFilter::predict(control) -> cv::KalmanFilter::correct(measurement). +Otherwise, predicted state will be returned which corresponds to the call of +cv::KalmanFilter::predict(control). +@sa cv::KalmanFilter +*/ +GAPI_EXPORTS GMat KalmanFilter(const GMat& measurement, const GOpaque& haveMeasurement, + const GMat& control, const cv::gapi::KalmanParams& kfParams); + +/** @overload +The case of Standard Kalman filter algorithm when there is no control in a dynamic system. +In this case the controlMatrix is empty and control vector is absent. + +@note Function textual ID is "org.opencv.video.KalmanFilterNoControl" + +@param measurement input matrix: 32-bit or 64-bit float 1-channel matrix containing measurements. +@param haveMeasurement dynamic input flag that indicates whether we get measurements +at a particular iteration. +@param kfParams Set of initialization parameters for Kalman filter kernel. + +@return Output matrix is predicted or corrected state. They can be 32-bit or 64-bit float +1-channel matrix @ref CV_32FC1 or @ref CV_64FC1. + +@sa cv::KalmanFilter + */ +GAPI_EXPORTS GMat KalmanFilter(const GMat& measurement, const GOpaque& haveMeasurement, + const cv::gapi::KalmanParams& kfParams); + //! @} gapi_video } //namespace gapi } //namespace cv + +namespace cv { namespace detail { +template<> struct CompileArgTag +{ + static const char* tag() + { + return "org.opencv.video.background_substractor_params"; + } +}; +} // namespace detail +} // namespace cv + #endif // OPENCV_GAPI_VIDEO_HPP diff --git a/inference-engine/thirdparty/fluid/modules/gapi/misc/python/pyopencv_gapi.hpp b/inference-engine/thirdparty/fluid/modules/gapi/misc/python/pyopencv_gapi.hpp index 7ef4cac57fc..e25328e64f1 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/misc/python/pyopencv_gapi.hpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/misc/python/pyopencv_gapi.hpp @@ -1,4 +1,18 @@ +#ifndef OPENCV_GAPI_PYOPENCV_GAPI_HPP +#define OPENCV_GAPI_PYOPENCV_GAPI_HPP + +#ifdef HAVE_OPENCV_GAPI + +// NB: Python wrapper replaces :: with _ for classes using gapi_GKernelPackage = cv::gapi::GKernelPackage; +using gapi_GNetPackage = cv::gapi::GNetPackage; +using gapi_ie_PyParams = cv::gapi::ie::PyParams; +using gapi_wip_IStreamSource_Ptr = cv::Ptr; + +// FIXME: Python wrapper generate code without namespace std, +// so it cause error: "string wasn't declared" +// WA: Create using +using std::string; template<> bool pyopencv_to(PyObject* obj, std::vector& value, const ArgInfo& info) @@ -12,6 +26,90 @@ PyObject* pyopencv_from(const std::vector& value) return pyopencv_from_generic_vec(value); } +template<> +bool pyopencv_to(PyObject* obj, GRunArgs& value, const ArgInfo& info) +{ + return pyopencv_to_generic_vec(obj, value, info); +} + +static PyObject* from_grunarg(const GRunArg& v) +{ + switch (v.index()) + { + case GRunArg::index_of(): + { + const auto& m = util::get(v); + return pyopencv_from(m); + } + + case GRunArg::index_of(): + { + const auto& s = util::get(v); + return pyopencv_from(s); + } + case GRunArg::index_of(): + { + const auto& vref = util::get(v); + switch (vref.getKind()) + { + case cv::detail::OpaqueKind::CV_POINT2F: + return pyopencv_from(vref.rref()); + default: + PyErr_SetString(PyExc_TypeError, "Unsupported kind for GArray"); + return NULL; + } + } + default: + PyErr_SetString(PyExc_TypeError, "Failed to unpack GRunArgs"); + return NULL; + } + GAPI_Assert(false); +} + +template<> +PyObject* pyopencv_from(const GRunArgs& value) +{ + size_t i, n = value.size(); + + // NB: It doesn't make sense to return list with a single element + if (n == 1) + { + PyObject* item = from_grunarg(value[0]); + if(!item) + { + return NULL; + } + return item; + } + + PyObject* list = PyList_New(n); + for(i = 0; i < n; ++i) + { + PyObject* item = from_grunarg(value[i]); + if(!item) + { + Py_DECREF(list); + PyErr_SetString(PyExc_TypeError, "Failed to unpack GRunArgs"); + return NULL; + } + PyList_SetItem(list, i, item); + } + + return list; +} + +template<> +bool pyopencv_to(PyObject* obj, GMetaArgs& value, const ArgInfo& info) +{ + return pyopencv_to_generic_vec(obj, value, info); +} + +template<> +PyObject* pyopencv_from(const GMetaArgs& value) +{ + return pyopencv_from_generic_vec(value); +} + template static PyObject* extract_proto_args(PyObject* py_args, PyObject* kw) { @@ -19,14 +117,24 @@ static PyObject* extract_proto_args(PyObject* py_args, PyObject* kw) GProtoArgs args; Py_ssize_t size = PyTuple_Size(py_args); - for (int i = 0; i < size; ++i) { + for (int i = 0; i < size; ++i) + { PyObject* item = PyTuple_GetItem(py_args, i); - if (PyObject_TypeCheck(item, reinterpret_cast(pyopencv_GScalar_TypePtr))) { + if (PyObject_TypeCheck(item, reinterpret_cast(pyopencv_GScalar_TypePtr))) + { args.emplace_back(reinterpret_cast(item)->v); - } else if (PyObject_TypeCheck(item, reinterpret_cast(pyopencv_GMat_TypePtr))) { + } + else if (PyObject_TypeCheck(item, reinterpret_cast(pyopencv_GMat_TypePtr))) + { args.emplace_back(reinterpret_cast(item)->v); - } else { - PyErr_SetString(PyExc_TypeError, "cv.GIn() supports only cv.GMat and cv.GScalar"); + } + else if (PyObject_TypeCheck(item, reinterpret_cast(pyopencv_GArrayP2f_TypePtr))) + { + args.emplace_back(reinterpret_cast(item)->v.strip()); + } + else + { + PyErr_SetString(PyExc_TypeError, "Unsupported type for cv.GIn()/cv.GOut()"); return NULL; } } @@ -43,3 +151,64 @@ static PyObject* pyopencv_cv_GOut(PyObject* , PyObject* py_args, PyObject* kw) { return extract_proto_args(py_args, kw); } + +static PyObject* pyopencv_cv_gin(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + GRunArgs args; + Py_ssize_t size = PyTuple_Size(py_args); + for (int i = 0; i < size; ++i) + { + PyObject* item = PyTuple_GetItem(py_args, i); + if (PyTuple_Check(item)) + { + cv::Scalar s; + if (pyopencv_to(item, s, ArgInfo("scalar", false))) + { + args.emplace_back(s); + } + else + { + PyErr_SetString(PyExc_TypeError, "Failed convert tuple to cv::Scalar"); + return NULL; + } + } + else if (PyArray_Check(item)) + { + cv::Mat m; + if (pyopencv_to(item, m, ArgInfo("mat", false))) + { + args.emplace_back(m); + } + else + { + PyErr_SetString(PyExc_TypeError, "Failed convert array to cv::Mat"); + return NULL; + } + } + else if (PyObject_TypeCheck(item, + reinterpret_cast(pyopencv_gapi_wip_IStreamSource_TypePtr))) + { + cv::gapi::wip::IStreamSource::Ptr source = + reinterpret_cast(item)->v; + args.emplace_back(source); + } + else + { + PyErr_SetString(PyExc_TypeError, "cv.gin can works only with cv::Mat," + "cv::Scalar, cv::gapi::wip::IStreamSource::Ptr"); + return NULL; + } + } + + return pyopencv_from_generic_vec(args); +} + +static PyObject* pyopencv_cv_gout(PyObject* o, PyObject* py_args, PyObject* kw) +{ + return pyopencv_cv_gin(o, py_args, kw); +} + +#endif // HAVE_OPENCV_GAPI +#endif // OPENCV_GAPI_PYOPENCV_GAPI_HPP diff --git a/inference-engine/thirdparty/fluid/modules/gapi/misc/python/shadow_gapi.hpp b/inference-engine/thirdparty/fluid/modules/gapi/misc/python/shadow_gapi.hpp index 2150b86cff4..792314512c5 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/misc/python/shadow_gapi.hpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/misc/python/shadow_gapi.hpp @@ -3,11 +3,30 @@ namespace cv { + struct GAPI_EXPORTS_W_SIMPLE GCompileArg { }; + GAPI_EXPORTS_W GCompileArgs compile_args(gapi::GKernelPackage pkg); + GAPI_EXPORTS_W GCompileArgs compile_args(gapi::GNetPackage pkg); + + // NB: This classes doesn't exist in *.so + // HACK: Mark them as a class to force python wrapper generate code for this entities class GAPI_EXPORTS_W_SIMPLE GProtoArg { }; class GAPI_EXPORTS_W_SIMPLE GProtoInputArgs { }; class GAPI_EXPORTS_W_SIMPLE GProtoOutputArgs { }; + class GAPI_EXPORTS_W_SIMPLE GRunArg { }; + class GAPI_EXPORTS_W_SIMPLE GMetaArg { }; + + class GAPI_EXPORTS_W_SIMPLE GArrayP2f { }; using GProtoInputArgs = GIOProtoArgs; using GProtoOutputArgs = GIOProtoArgs; + + namespace gapi + { + GAPI_EXPORTS_W gapi::GNetPackage networks(const cv::gapi::ie::PyParams& params); + namespace wip + { + class GAPI_EXPORTS_W IStreamSource { }; + } // namespace wip + } // namespace gapi } // namespace cv diff --git a/inference-engine/thirdparty/fluid/modules/gapi/misc/python/test/test_gapi_core.py b/inference-engine/thirdparty/fluid/modules/gapi/misc/python/test/test_gapi_core.py index 7720dbc3987..267037a78db 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/misc/python/test/test_gapi_core.py +++ b/inference-engine/thirdparty/fluid/modules/gapi/misc/python/test/test_gapi_core.py @@ -2,29 +2,30 @@ import numpy as np import cv2 as cv +import os from tests_common import NewOpenCVTests # Plaidml is an optional backend pkgs = [ - cv.gapi.core.ocl.kernels(), - cv.gapi.core.cpu.kernels(), - cv.gapi.core.fluid.kernels() - # cv.gapi.core.plaidml.kernels() - ] + ('ocl' , cv.gapi.core.ocl.kernels()), + ('cpu' , cv.gapi.core.cpu.kernels()), + ('fluid' , cv.gapi.core.fluid.kernels()) + # ('plaidml', cv.gapi.core.plaidml.kernels()) + ] class gapi_core_test(NewOpenCVTests): def test_add(self): # TODO: Extend to use any type and size here - sz = (1280, 720) - in1 = np.random.randint(0, 100, sz).astype(np.uint8) - in2 = np.random.randint(0, 100, sz).astype(np.uint8) + sz = (720, 1280) + in1 = np.full(sz, 100) + in2 = np.full(sz, 50) # OpenCV - expected = in1 + in2 + expected = cv.add(in1, in2) # G-API g_in1 = cv.GMat() @@ -32,15 +33,39 @@ class gapi_core_test(NewOpenCVTests): g_out = cv.gapi.add(g_in1, g_in2) comp = cv.GComputation(cv.GIn(g_in1, g_in2), cv.GOut(g_out)) - for pkg in pkgs: - actual = comp.apply(in1, in2, args=cv.compile_args(pkg)) + for pkg_name, pkg in pkgs: + actual = comp.apply(cv.gin(in1, in2), args=cv.compile_args(pkg)) # Comparison - self.assertEqual(0.0, cv.norm(expected, actual, cv.NORM_INF)) + self.assertEqual(0.0, cv.norm(expected, actual, cv.NORM_INF), + 'Failed on ' + pkg_name + ' backend') + self.assertEqual(expected.dtype, actual.dtype, 'Failed on ' + pkg_name + ' backend') + + + def test_add_uint8(self): + sz = (720, 1280) + in1 = np.full(sz, 100, dtype=np.uint8) + in2 = np.full(sz, 50 , dtype=np.uint8) + + # OpenCV + expected = cv.add(in1, in2) + + # G-API + g_in1 = cv.GMat() + g_in2 = cv.GMat() + g_out = cv.gapi.add(g_in1, g_in2) + comp = cv.GComputation(cv.GIn(g_in1, g_in2), cv.GOut(g_out)) + + for pkg_name, pkg in pkgs: + actual = comp.apply(cv.gin(in1, in2), args=cv.compile_args(pkg)) + # Comparison + self.assertEqual(0.0, cv.norm(expected, actual, cv.NORM_INF), + 'Failed on ' + pkg_name + ' backend') + self.assertEqual(expected.dtype, actual.dtype, 'Failed on ' + pkg_name + ' backend') def test_mean(self): - sz = (1280, 720, 3) - in_mat = np.random.randint(0, 100, sz).astype(np.uint8) + img_path = self.find_file('cv/face/david2.jpg', [os.environ.get('OPENCV_TEST_DATA_PATH')]) + in_mat = cv.imread(img_path) # OpenCV expected = cv.mean(in_mat) @@ -50,10 +75,57 @@ class gapi_core_test(NewOpenCVTests): g_out = cv.gapi.mean(g_in) comp = cv.GComputation(g_in, g_out) - for pkg in pkgs: - actual = comp.apply(in_mat, args=cv.compile_args(pkg)) + for pkg_name, pkg in pkgs: + actual = comp.apply(cv.gin(in_mat), args=cv.compile_args(pkg)) # Comparison - self.assertEqual(0.0, cv.norm(expected, actual, cv.NORM_INF)) + self.assertEqual(0.0, cv.norm(expected, actual, cv.NORM_INF), + 'Failed on ' + pkg_name + ' backend') + + + def test_split3(self): + img_path = self.find_file('cv/face/david2.jpg', [os.environ.get('OPENCV_TEST_DATA_PATH')]) + in_mat = cv.imread(img_path) + + # OpenCV + expected = cv.split(in_mat) + + # G-API + g_in = cv.GMat() + b, g, r = cv.gapi.split3(g_in) + comp = cv.GComputation(cv.GIn(g_in), cv.GOut(b, g, r)) + + for pkg_name, pkg in pkgs: + actual = comp.apply(cv.gin(in_mat), args=cv.compile_args(pkg)) + # Comparison + for e, a in zip(expected, actual): + self.assertEqual(0.0, cv.norm(e, a, cv.NORM_INF), + 'Failed on ' + pkg_name + ' backend') + self.assertEqual(e.dtype, a.dtype, 'Failed on ' + pkg_name + ' backend') + + + def test_threshold(self): + img_path = self.find_file('cv/face/david2.jpg', [os.environ.get('OPENCV_TEST_DATA_PATH')]) + in_mat = cv.cvtColor(cv.imread(img_path), cv.COLOR_RGB2GRAY) + maxv = (30, 30) + + # OpenCV + expected_thresh, expected_mat = cv.threshold(in_mat, maxv[0], maxv[0], cv.THRESH_TRIANGLE) + + # G-API + g_in = cv.GMat() + g_sc = cv.GScalar() + mat, threshold = cv.gapi.threshold(g_in, g_sc, cv.THRESH_TRIANGLE) + comp = cv.GComputation(cv.GIn(g_in, g_sc), cv.GOut(mat, threshold)) + + for pkg_name, pkg in pkgs: + actual_mat, actual_thresh = comp.apply(cv.gin(in_mat, maxv), args=cv.compile_args(pkg)) + # Comparison + self.assertEqual(0.0, cv.norm(expected_mat, actual_mat, cv.NORM_INF), + 'Failed on ' + pkg_name + ' backend') + self.assertEqual(expected_mat.dtype, actual_mat.dtype, + 'Failed on ' + pkg_name + ' backend') + self.assertEqual(expected_thresh, actual_thresh[0], + 'Failed on ' + pkg_name + ' backend') if __name__ == '__main__': diff --git a/inference-engine/thirdparty/fluid/modules/gapi/misc/python/test/test_gapi_imgproc.py b/inference-engine/thirdparty/fluid/modules/gapi/misc/python/test/test_gapi_imgproc.py new file mode 100644 index 00000000000..dd1e3970819 --- /dev/null +++ b/inference-engine/thirdparty/fluid/modules/gapi/misc/python/test/test_gapi_imgproc.py @@ -0,0 +1,79 @@ +#!/usr/bin/env python + +import numpy as np +import cv2 as cv +import os + +from tests_common import NewOpenCVTests + + +# Plaidml is an optional backend +pkgs = [ + ('ocl' , cv.gapi.core.ocl.kernels()), + ('cpu' , cv.gapi.core.cpu.kernels()), + ('fluid' , cv.gapi.core.fluid.kernels()) + # ('plaidml', cv.gapi.core.plaidml.kernels()) + ] + + +class gapi_imgproc_test(NewOpenCVTests): + + def test_good_features_to_track(self): + # TODO: Extend to use any type and size here + img_path = self.find_file('cv/face/david2.jpg', [os.environ.get('OPENCV_TEST_DATA_PATH')]) + in1 = cv.cvtColor(cv.imread(img_path), cv.COLOR_RGB2GRAY) + + # NB: goodFeaturesToTrack configuration + max_corners = 50 + quality_lvl = 0.01 + min_distance = 10 + block_sz = 3 + use_harris_detector = True + k = 0.04 + mask = None + + # OpenCV + expected = cv.goodFeaturesToTrack(in1, max_corners, quality_lvl, + min_distance, mask=mask, + blockSize=block_sz, useHarrisDetector=use_harris_detector, k=k) + + # G-API + g_in = cv.GMat() + g_out = cv.gapi.goodFeaturesToTrack(g_in, max_corners, quality_lvl, + min_distance, mask, block_sz, use_harris_detector, k) + + comp = cv.GComputation(cv.GIn(g_in), cv.GOut(g_out)) + + for pkg_name, pkg in pkgs: + actual = comp.apply(cv.gin(in1), args=cv.compile_args(pkg)) + # NB: OpenCV & G-API have different output shapes: + # OpenCV - (num_points, 1, 2) + # G-API - (num_points, 2) + # Comparison + self.assertEqual(0.0, cv.norm(expected.flatten(), actual.flatten(), cv.NORM_INF), + 'Failed on ' + pkg_name + ' backend') + + + def test_rgb2gray(self): + # TODO: Extend to use any type and size here + img_path = self.find_file('cv/face/david2.jpg', [os.environ.get('OPENCV_TEST_DATA_PATH')]) + in1 = cv.imread(img_path) + + # OpenCV + expected = cv.cvtColor(in1, cv.COLOR_RGB2GRAY) + + # G-API + g_in = cv.GMat() + g_out = cv.gapi.RGB2Gray(g_in) + + comp = cv.GComputation(cv.GIn(g_in), cv.GOut(g_out)) + + for pkg_name, pkg in pkgs: + actual = comp.apply(cv.gin(in1), args=cv.compile_args(pkg)) + # Comparison + self.assertEqual(0.0, cv.norm(expected, actual, cv.NORM_INF), + 'Failed on ' + pkg_name + ' backend') + + +if __name__ == '__main__': + NewOpenCVTests.bootstrap() diff --git a/inference-engine/thirdparty/fluid/modules/gapi/misc/python/test/test_gapi_infer.py b/inference-engine/thirdparty/fluid/modules/gapi/misc/python/test/test_gapi_infer.py new file mode 100644 index 00000000000..a6fabf72534 --- /dev/null +++ b/inference-engine/thirdparty/fluid/modules/gapi/misc/python/test/test_gapi_infer.py @@ -0,0 +1,62 @@ +#!/usr/bin/env python + +import numpy as np +import cv2 as cv +import os + +from tests_common import NewOpenCVTests + + +class test_gapi_infer(NewOpenCVTests): + + def test_getAvailableTargets(self): + targets = cv.dnn.getAvailableTargets(cv.dnn.DNN_BACKEND_OPENCV) + self.assertTrue(cv.dnn.DNN_TARGET_CPU in targets) + + + def test_age_gender_infer(self): + + # NB: Check IE + if not cv.dnn.DNN_TARGET_CPU in cv.dnn.getAvailableTargets(cv.dnn.DNN_BACKEND_INFERENCE_ENGINE): + return + + root_path = '/omz_intel_models/intel/age-gender-recognition-retail-0013/FP32/age-gender-recognition-retail-0013' + model_path = self.find_file(root_path + '.xml', [os.environ.get('OPENCV_DNN_TEST_DATA_PATH')]) + weights_path = self.find_file(root_path + '.bin', [os.environ.get('OPENCV_DNN_TEST_DATA_PATH')]) + img_path = self.find_file('cv/face/david2.jpg', [os.environ.get('OPENCV_TEST_DATA_PATH')]) + device_id = 'CPU' + img = cv.resize(cv.imread(img_path), (62,62)) + + # OpenCV DNN + net = cv.dnn.readNetFromModelOptimizer(model_path, weights_path) + net.setPreferableBackend(cv.dnn.DNN_BACKEND_INFERENCE_ENGINE) + net.setPreferableTarget(cv.dnn.DNN_TARGET_CPU) + + blob = cv.dnn.blobFromImage(img) + + net.setInput(blob) + dnn_age, dnn_gender = net.forward(net.getUnconnectedOutLayersNames()) + + # OpenCV G-API + g_in = cv.GMat() + inputs = cv.GInferInputs() + inputs.setInput('data', g_in) + + outputs = cv.gapi.infer("net", inputs) + age_g = outputs.at("age_conv3") + gender_g = outputs.at("prob") + + comp = cv.GComputation(cv.GIn(g_in), cv.GOut(age_g, gender_g)) + pp = cv.gapi.ie.params("net", model_path, weights_path, device_id) + + nets = cv.gapi.networks(pp) + args = cv.compile_args(nets) + gapi_age, gapi_gender = comp.apply(cv.gin(img), args=cv.compile_args(cv.gapi.networks(pp))) + + # Check + self.assertEqual(0.0, cv.norm(dnn_gender, gapi_gender, cv.NORM_INF)) + self.assertEqual(0.0, cv.norm(dnn_age, gapi_age, cv.NORM_INF)) + + +if __name__ == '__main__': + NewOpenCVTests.bootstrap() diff --git a/inference-engine/thirdparty/fluid/modules/gapi/misc/python/test/test_gapi_sample_pipelines.py b/inference-engine/thirdparty/fluid/modules/gapi/misc/python/test/test_gapi_sample_pipelines.py index 1f672f4bc2e..53304fcb261 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/misc/python/test/test_gapi_sample_pipelines.py +++ b/inference-engine/thirdparty/fluid/modules/gapi/misc/python/test/test_gapi_sample_pipelines.py @@ -2,25 +2,26 @@ import numpy as np import cv2 as cv +import os from tests_common import NewOpenCVTests # Plaidml is an optional backend pkgs = [ - cv.gapi.core.ocl.kernels(), - cv.gapi.core.cpu.kernels(), - cv.gapi.core.fluid.kernels() - # cv.gapi.core.plaidml.kernels() - ] + ('ocl' , cv.gapi.core.ocl.kernels()), + ('cpu' , cv.gapi.core.cpu.kernels()), + ('fluid' , cv.gapi.core.fluid.kernels()) + # ('plaidml', cv.gapi.core.plaidml.kernels()) + ] class gapi_sample_pipelines(NewOpenCVTests): # NB: This test check multiple outputs for operation def test_mean_over_r(self): - sz = (100, 100, 3) - in_mat = np.random.randint(0, 100, sz).astype(np.uint8) + img_path = self.find_file('cv/face/david2.jpg', [os.environ.get('OPENCV_TEST_DATA_PATH')]) + in_mat = cv.imread(img_path) # # OpenCV _, _, r_ch = cv.split(in_mat) @@ -32,10 +33,11 @@ class gapi_sample_pipelines(NewOpenCVTests): g_out = cv.gapi.mean(r) comp = cv.GComputation(g_in, g_out) - for pkg in pkgs: - actual = comp.apply(in_mat, args=cv.compile_args(pkg)) + for pkg_name, pkg in pkgs: + actual = comp.apply(cv.gin(in_mat), args=cv.compile_args(pkg)) # Comparison - self.assertEqual(0.0, cv.norm(expected, actual, cv.NORM_INF)) + self.assertEqual(0.0, cv.norm(expected, actual, cv.NORM_INF), + 'Failed on ' + pkg_name + ' backend') if __name__ == '__main__': diff --git a/inference-engine/thirdparty/fluid/modules/gapi/misc/python/test/test_gapi_streaming.py b/inference-engine/thirdparty/fluid/modules/gapi/misc/python/test/test_gapi_streaming.py new file mode 100644 index 00000000000..ae7ef5d338a --- /dev/null +++ b/inference-engine/thirdparty/fluid/modules/gapi/misc/python/test/test_gapi_streaming.py @@ -0,0 +1,202 @@ +#!/usr/bin/env python + +import numpy as np +import cv2 as cv +import os + +from tests_common import NewOpenCVTests + +class test_gapi_streaming(NewOpenCVTests): + + def test_image_input(self): + sz = (1280, 720) + in_mat = np.random.randint(0, 100, sz).astype(np.uint8) + + # OpenCV + expected = cv.medianBlur(in_mat, 3) + + # G-API + g_in = cv.GMat() + g_out = cv.gapi.medianBlur(g_in, 3) + c = cv.GComputation(g_in, g_out) + ccomp = c.compileStreaming(cv.descr_of(cv.gin(in_mat))) + ccomp.setSource(cv.gin(in_mat)) + ccomp.start() + + _, actual = ccomp.pull() + + # Assert + self.assertEqual(0.0, cv.norm(expected, actual, cv.NORM_INF)) + + + def test_video_input(self): + ksize = 3 + path = self.find_file('cv/video/768x576.avi', [os.environ['OPENCV_TEST_DATA_PATH']]) + + # OpenCV + cap = cv.VideoCapture(path) + + # G-API + g_in = cv.GMat() + g_out = cv.gapi.medianBlur(g_in, ksize) + c = cv.GComputation(g_in, g_out) + + ccomp = c.compileStreaming() + source = cv.gapi.wip.make_capture_src(path) + ccomp.setSource(source) + ccomp.start() + + # Assert + max_num_frames = 10 + proc_num_frames = 0 + while cap.isOpened(): + has_expected, expected = cap.read() + has_actual, actual = ccomp.pull() + + self.assertEqual(has_expected, has_actual) + + if not has_actual: + break + + self.assertEqual(0.0, cv.norm(cv.medianBlur(expected, ksize), actual, cv.NORM_INF)) + + proc_num_frames += 1 + if proc_num_frames == max_num_frames: + break; + + + def test_video_split3(self): + path = self.find_file('cv/video/768x576.avi', [os.environ['OPENCV_TEST_DATA_PATH']]) + + # OpenCV + cap = cv.VideoCapture(path) + + # G-API + g_in = cv.GMat() + b, g, r = cv.gapi.split3(g_in) + c = cv.GComputation(cv.GIn(g_in), cv.GOut(b, g, r)) + + ccomp = c.compileStreaming() + source = cv.gapi.wip.make_capture_src(path) + ccomp.setSource(source) + ccomp.start() + + # Assert + max_num_frames = 10 + proc_num_frames = 0 + while cap.isOpened(): + has_expected, frame = cap.read() + has_actual, actual = ccomp.pull() + + self.assertEqual(has_expected, has_actual) + + if not has_actual: + break + + expected = cv.split(frame) + for e, a in zip(expected, actual): + self.assertEqual(0.0, cv.norm(e, a, cv.NORM_INF)) + + proc_num_frames += 1 + if proc_num_frames == max_num_frames: + break; + + + def test_video_add(self): + sz = (576, 768, 3) + in_mat = np.random.randint(0, 100, sz).astype(np.uint8) + + path = self.find_file('cv/video/768x576.avi', [os.environ['OPENCV_TEST_DATA_PATH']]) + + # OpenCV + cap = cv.VideoCapture(path) + + # G-API + g_in1 = cv.GMat() + g_in2 = cv.GMat() + out = cv.gapi.add(g_in1, g_in2) + c = cv.GComputation(cv.GIn(g_in1, g_in2), cv.GOut(out)) + + ccomp = c.compileStreaming() + source = cv.gapi.wip.make_capture_src(path) + ccomp.setSource(cv.gin(source, in_mat)) + ccomp.start() + + # Assert + max_num_frames = 10 + proc_num_frames = 0 + while cap.isOpened(): + has_expected, frame = cap.read() + has_actual, actual = ccomp.pull() + + self.assertEqual(has_expected, has_actual) + + if not has_actual: + break + + expected = cv.add(frame, in_mat) + self.assertEqual(0.0, cv.norm(expected, actual, cv.NORM_INF)) + + proc_num_frames += 1 + if proc_num_frames == max_num_frames: + break; + + + def test_video_good_features_to_track(self): + path = self.find_file('cv/video/768x576.avi', [os.environ['OPENCV_TEST_DATA_PATH']]) + + # NB: goodFeaturesToTrack configuration + max_corners = 50 + quality_lvl = 0.01 + min_distance = 10 + block_sz = 3 + use_harris_detector = True + k = 0.04 + mask = None + + # OpenCV + cap = cv.VideoCapture(path) + + # G-API + g_in = cv.GMat() + g_gray = cv.gapi.RGB2Gray(g_in) + g_out = cv.gapi.goodFeaturesToTrack(g_gray, max_corners, quality_lvl, + min_distance, mask, block_sz, use_harris_detector, k) + + c = cv.GComputation(cv.GIn(g_in), cv.GOut(g_out)) + + ccomp = c.compileStreaming() + source = cv.gapi.wip.make_capture_src(path) + ccomp.setSource(source) + ccomp.start() + + # Assert + max_num_frames = 10 + proc_num_frames = 0 + while cap.isOpened(): + has_expected, frame = cap.read() + has_actual, actual = ccomp.pull() + + self.assertEqual(has_expected, has_actual) + + if not has_actual: + break + + # OpenCV + frame = cv.cvtColor(frame, cv.COLOR_RGB2GRAY) + expected = cv.goodFeaturesToTrack(frame, max_corners, quality_lvl, + min_distance, mask=mask, + blockSize=block_sz, useHarrisDetector=use_harris_detector, k=k) + for e, a in zip(expected, actual): + # NB: OpenCV & G-API have different output shapes: + # OpenCV - (num_points, 1, 2) + # G-API - (num_points, 2) + self.assertEqual(0.0, cv.norm(e.flatten(), a.flatten(), cv.NORM_INF)) + + proc_num_frames += 1 + if proc_num_frames == max_num_frames: + break; + + +if __name__ == '__main__': + NewOpenCVTests.bootstrap() diff --git a/inference-engine/thirdparty/fluid/modules/gapi/perf/common/gapi_core_perf_tests.hpp b/inference-engine/thirdparty/fluid/modules/gapi/perf/common/gapi_core_perf_tests.hpp index ac7182c07f2..4a88b4f59c1 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/perf/common/gapi_core_perf_tests.hpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/perf/common/gapi_core_perf_tests.hpp @@ -2,7 +2,7 @@ // It is subject to the license terms in the LICENSE file found in the top-level directory // of this distribution and at http://opencv.org/license.html. // -// Copyright (C) 2018 Intel Corporation +// Copyright (C) 2018-2020 Intel Corporation #ifndef OPENCV_GAPI_CORE_PERF_TESTS_HPP @@ -52,6 +52,7 @@ namespace opencv_test class AbsDiffPerfTest : public TestPerfParams> {}; class AbsDiffCPerfTest : public TestPerfParams> {}; class SumPerfTest : public TestPerfParams> {}; + class CountNonZeroPerfTest : public TestPerfParams> {}; class AddWeightedPerfTest : public TestPerfParams> {}; class NormPerfTest : public TestPerfParams> {}; class IntegralPerfTest : public TestPerfParams> {}; diff --git a/inference-engine/thirdparty/fluid/modules/gapi/perf/common/gapi_core_perf_tests_inl.hpp b/inference-engine/thirdparty/fluid/modules/gapi/perf/common/gapi_core_perf_tests_inl.hpp index 9ad1d04d607..ac901811842 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/perf/common/gapi_core_perf_tests_inl.hpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/perf/common/gapi_core_perf_tests_inl.hpp @@ -2,7 +2,7 @@ // It is subject to the license terms in the LICENSE file found in the top-level directory // of this distribution and at http://opencv.org/license.html. // -// Copyright (C) 2018 Intel Corporation +// Copyright (C) 2018-2020 Intel Corporation #ifndef OPENCV_GAPI_CORE_PERF_TESTS_INL_HPP @@ -1011,6 +1011,44 @@ PERF_TEST_P_(SumPerfTest, TestPerformance) SANITY_CHECK_NOTHING(); } +//------------------------------------------------------------------------------ +#pragma push_macro("countNonZero") +#undef countNonZero +PERF_TEST_P_(CountNonZeroPerfTest, TestPerformance) +{ + compare_scalar_f cmpF; + cv::Size sz_in; + MatType type = -1; + cv::GCompileArgs compile_args; + std::tie(cmpF, sz_in, type, compile_args) = GetParam(); + + initMatrixRandU(type, sz_in, type, false); + int out_cnz_gapi, out_cnz_ocv; + + // OpenCV code /////////////////////////////////////////////////////////// + out_cnz_ocv = cv::countNonZero(in_mat1); + + // G-API code //////////////////////////////////////////////////////////// + cv::GMat in; + auto out = cv::gapi::countNonZero(in); + cv::GComputation c(cv::GIn(in), cv::GOut(out)); + + // Warm-up graph engine: + c.apply(cv::gin(in_mat1), cv::gout(out_cnz_gapi), std::move(compile_args)); + + TEST_CYCLE() + { + c.apply(cv::gin(in_mat1), cv::gout(out_cnz_gapi)); + } + + // Comparison //////////////////////////////////////////////////////////// + { + EXPECT_TRUE(cmpF(out_cnz_gapi, out_cnz_ocv)); + } + + SANITY_CHECK_NOTHING(); +} +#pragma pop_macro("countNonZero") //------------------------------------------------------------------------------ PERF_TEST_P_(AddWeightedPerfTest, TestPerformance) @@ -2086,7 +2124,7 @@ PERF_TEST_P_(SizePerfTest, TestPerformance) // G-API code ////////////////////////////////////////////////////////////// cv::GMat in; - auto out = cv::gapi::size(in); + auto out = cv::gapi::streaming::size(in); cv::GComputation c(cv::GIn(in), cv::GOut(out)); cv::Size out_sz; @@ -2118,7 +2156,7 @@ PERF_TEST_P_(SizeRPerfTest, TestPerformance) // G-API code ////////////////////////////////////////////////////////////// cv::GOpaque op_rect; - auto out = cv::gapi::size(op_rect); + auto out = cv::gapi::streaming::size(op_rect); cv::GComputation c(cv::GIn(op_rect), cv::GOut(out)); cv::Size out_sz; diff --git a/inference-engine/thirdparty/fluid/modules/gapi/perf/common/gapi_imgproc_perf_tests.hpp b/inference-engine/thirdparty/fluid/modules/gapi/perf/common/gapi_imgproc_perf_tests.hpp index b2591907cf9..c6efe8b8ac1 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/perf/common/gapi_imgproc_perf_tests.hpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/perf/common/gapi_imgproc_perf_tests.hpp @@ -42,10 +42,15 @@ class GoodFeaturesPerfTest : public TestPerfParams> {}; class EqHistPerfTest : public TestPerfParams> {}; +class BGR2RGBPerfTest : public TestPerfParams> {}; class RGB2GrayPerfTest : public TestPerfParams> {}; class BGR2GrayPerfTest : public TestPerfParams> {}; class RGB2YUVPerfTest : public TestPerfParams> {}; class YUV2RGBPerfTest : public TestPerfParams> {}; +class BGR2I420PerfTest : public TestPerfParams> {}; +class RGB2I420PerfTest : public TestPerfParams> {}; +class I4202BGRPerfTest : public TestPerfParams> {}; +class I4202RGBPerfTest : public TestPerfParams> {}; class RGB2LabPerfTest : public TestPerfParams> {}; class BGR2LUVPerfTest : public TestPerfParams> {}; class LUV2BGRPerfTest : public TestPerfParams> {}; diff --git a/inference-engine/thirdparty/fluid/modules/gapi/perf/common/gapi_imgproc_perf_tests_inl.hpp b/inference-engine/thirdparty/fluid/modules/gapi/perf/common/gapi_imgproc_perf_tests_inl.hpp index f71e435a2ba..3d002989889 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/perf/common/gapi_imgproc_perf_tests_inl.hpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/perf/common/gapi_imgproc_perf_tests_inl.hpp @@ -788,6 +788,44 @@ PERF_TEST_P_(EqHistPerfTest, TestPerformance) //------------------------------------------------------------------------------ +PERF_TEST_P_(BGR2RGBPerfTest, TestPerformance) +{ + compare_f cmpF; + cv::Size sz; + cv::GCompileArgs compile_args; + std::tie(cmpF, sz, compile_args) = GetParam(); + + initMatrixRandN(CV_8UC3, sz, CV_8UC3, false); + + // OpenCV code ///////////////////////////////////////////////////////////// + { + cv::cvtColor(in_mat1, out_mat_ocv, cv::COLOR_BGR2RGB); + } + + // G-API code ////////////////////////////////////////////////////////////// + cv::GMat in; + auto out = cv::gapi::BGR2RGB(in); + cv::GComputation c(in, out); + + // Warm-up graph engine: + c.apply(in_mat1, out_mat_gapi, std::move(compile_args)); + + TEST_CYCLE() + { + c.apply(in_mat1, out_mat_gapi); + } + + // Comparison ////////////////////////////////////////////////////////////// + { + EXPECT_TRUE(cmpF(out_mat_gapi, out_mat_ocv)); + EXPECT_EQ(out_mat_gapi.size(), sz); + } + + SANITY_CHECK_NOTHING(); +} + +//------------------------------------------------------------------------------ + PERF_TEST_P_(RGB2GrayPerfTest, TestPerformance) { compare_f cmpF = get<0>(GetParam()); @@ -940,6 +978,158 @@ PERF_TEST_P_(YUV2RGBPerfTest, TestPerformance) //------------------------------------------------------------------------------ +PERF_TEST_P_(BGR2I420PerfTest, TestPerformance) +{ + compare_f cmpF; + cv::Size sz; + cv::GCompileArgs compile_args; + std::tie(cmpF, sz, compile_args) = GetParam(); + + initMatrixRandN(CV_8UC3, sz, CV_8UC1, false); + + // OpenCV code ///////////////////////////////////////////////////////////// + { + cv::cvtColor(in_mat1, out_mat_ocv, cv::COLOR_BGR2YUV_I420); + } + + // G-API code ////////////////////////////////////////////////////////////// + cv::GMat in; + auto out = cv::gapi::BGR2I420(in); + cv::GComputation c(in, out); + + // Warm-up graph engine: + c.apply(in_mat1, out_mat_gapi, std::move(compile_args)); + + TEST_CYCLE() + { + c.apply(in_mat1, out_mat_gapi); + } + + // Comparison ////////////////////////////////////////////////////////////// + { + EXPECT_TRUE(cmpF(out_mat_gapi, out_mat_ocv)); + EXPECT_EQ(out_mat_gapi.size(), Size(sz.width, sz.height * 3 / 2)); + } + + SANITY_CHECK_NOTHING(); +} + +//------------------------------------------------------------------------------ + +PERF_TEST_P_(RGB2I420PerfTest, TestPerformance) +{ + compare_f cmpF; + cv::Size sz; + cv::GCompileArgs compile_args; + std::tie(cmpF, sz, compile_args) = GetParam(); + + initMatrixRandN(CV_8UC3, sz, CV_8UC1, false); + + // OpenCV code ///////////////////////////////////////////////////////////// + { + cv::cvtColor(in_mat1, out_mat_ocv, cv::COLOR_RGB2YUV_I420); + } + + // G-API code ////////////////////////////////////////////////////////////// + cv::GMat in; + auto out = cv::gapi::RGB2I420(in); + cv::GComputation c(in, out); + + // Warm-up graph engine: + c.apply(in_mat1, out_mat_gapi, std::move(compile_args)); + + TEST_CYCLE() + { + c.apply(in_mat1, out_mat_gapi); + } + + // Comparison ////////////////////////////////////////////////////////////// + { + EXPECT_TRUE(cmpF(out_mat_gapi, out_mat_ocv)); + EXPECT_EQ(out_mat_gapi.size(), Size(sz.width, sz.height * 3 / 2)); + } + + SANITY_CHECK_NOTHING(); +} + +//------------------------------------------------------------------------------ + +PERF_TEST_P_(I4202BGRPerfTest, TestPerformance) +{ + compare_f cmpF; + cv::Size sz; + cv::GCompileArgs compile_args; + std::tie(cmpF, sz, compile_args) = GetParam(); + + initMatrixRandN(CV_8UC1, sz, CV_8UC3, false); + + // OpenCV code ///////////////////////////////////////////////////////////// + { + cv::cvtColor(in_mat1, out_mat_ocv, cv::COLOR_YUV2BGR_I420); + } + + // G-API code ////////////////////////////////////////////////////////////// + cv::GMat in; + auto out = cv::gapi::I4202BGR(in); + cv::GComputation c(in, out); + + // Warm-up graph engine: + c.apply(in_mat1, out_mat_gapi, std::move(compile_args)); + + TEST_CYCLE() + { + c.apply(in_mat1, out_mat_gapi); + } + + // Comparison ////////////////////////////////////////////////////////////// + { + EXPECT_TRUE(cmpF(out_mat_gapi, out_mat_ocv)); + EXPECT_EQ(out_mat_gapi.size(), Size(sz.width, sz.height * 2 / 3)); + } + + SANITY_CHECK_NOTHING(); +} + +//------------------------------------------------------------------------------ + +PERF_TEST_P_(I4202RGBPerfTest, TestPerformance) +{ + compare_f cmpF; + cv::Size sz; + cv::GCompileArgs compile_args; + std::tie(cmpF, sz, compile_args) = GetParam(); + + initMatrixRandN(CV_8UC1, sz, CV_8UC3, false); + + // OpenCV code ///////////////////////////////////////////////////////////// + { + cv::cvtColor(in_mat1, out_mat_ocv, cv::COLOR_YUV2RGB_I420); + } + + // G-API code ////////////////////////////////////////////////////////////// + cv::GMat in; + auto out = cv::gapi::I4202RGB(in); + cv::GComputation c(in, out); + + // Warm-up graph engine: + c.apply(in_mat1, out_mat_gapi, std::move(compile_args)); + + TEST_CYCLE() + { + c.apply(in_mat1, out_mat_gapi); + } + + // Comparison ////////////////////////////////////////////////////////////// + { + EXPECT_TRUE(cmpF(out_mat_gapi, out_mat_ocv)); + EXPECT_EQ(out_mat_gapi.size(), Size(sz.width, sz.height * 2 / 3)); + } + + SANITY_CHECK_NOTHING(); +} + +//------------------------------------------------------------------------------ + PERF_TEST_P_(RGB2LabPerfTest, TestPerformance) { compare_f cmpF = get<0>(GetParam()); diff --git a/inference-engine/thirdparty/fluid/modules/gapi/perf/common/gapi_video_perf_tests_inl.hpp b/inference-engine/thirdparty/fluid/modules/gapi/perf/common/gapi_video_perf_tests_inl.hpp index d1c81a92453..189a09c6f24 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/perf/common/gapi_video_perf_tests_inl.hpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/perf/common/gapi_video_perf_tests_inl.hpp @@ -44,7 +44,7 @@ PERF_TEST_P_(BuildOptFlowPyramidPerfTest, TestPerformance) outMaxLevelGAPI = static_cast(outMaxLevelSc[0]); // Comparison ////////////////////////////////////////////////////////////// - compareOutputPyramids(outOCV, outGAPI); + compareOutputPyramids(outGAPI, outOCV); SANITY_CHECK_NOTHING(); } @@ -74,7 +74,7 @@ PERF_TEST_P_(OptFlowLKPerfTest, TestPerformance) } // Comparison ////////////////////////////////////////////////////////////// - compareOutputsOptFlow(outOCV, outGAPI); + compareOutputsOptFlow(outGAPI, outOCV); SANITY_CHECK_NOTHING(); } @@ -109,7 +109,7 @@ PERF_TEST_P_(OptFlowLKForPyrPerfTest, TestPerformance) } // Comparison ////////////////////////////////////////////////////////////// - compareOutputsOptFlow(outOCV, outGAPI); + compareOutputsOptFlow(outGAPI, outOCV); SANITY_CHECK_NOTHING(); } @@ -147,7 +147,7 @@ PERF_TEST_P_(BuildPyr_CalcOptFlow_PipelinePerfTest, TestPerformance) } // Comparison ////////////////////////////////////////////////////////////// - compareOutputsOptFlow(outOCV, outGAPI); + compareOutputsOptFlow(outGAPI, outOCV); SANITY_CHECK_NOTHING(); } diff --git a/inference-engine/thirdparty/fluid/modules/gapi/perf/cpu/gapi_core_perf_tests_cpu.cpp b/inference-engine/thirdparty/fluid/modules/gapi/perf/cpu/gapi_core_perf_tests_cpu.cpp index d4319f24cfe..ffc4b1a6463 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/perf/cpu/gapi_core_perf_tests_cpu.cpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/perf/cpu/gapi_core_perf_tests_cpu.cpp @@ -2,7 +2,7 @@ // It is subject to the license terms in the LICENSE file found in the top-level directory // of this distribution and at http://opencv.org/license.html. // -// Copyright (C) 2018 Intel Corporation +// Copyright (C) 2018-2020 Intel Corporation #include "../perf_precomp.hpp" @@ -160,6 +160,12 @@ INSTANTIATE_TEST_CASE_P(SumPerfTestCPU, SumPerfTest, //Values(0.0), Values(cv::compile_args(CORE_CPU)))); +INSTANTIATE_TEST_CASE_P(CountNonZeroPerfTestCPU, CountNonZeroPerfTest, + Combine(Values(AbsToleranceScalar(0.0).to_compare_f()), + Values(szSmall128, szVGA, sz720p, sz1080p), + Values(CV_8UC1, CV_16UC1, CV_16SC1, CV_32FC1), + Values(cv::compile_args(CORE_CPU)))); + INSTANTIATE_TEST_CASE_P(AddWeightedPerfTestCPU, AddWeightedPerfTest, Combine(Values(Tolerance_FloatRel_IntAbs(1e-6, 1).to_compare_f()), Values(szSmall128, szVGA, sz720p, sz1080p), diff --git a/inference-engine/thirdparty/fluid/modules/gapi/perf/cpu/gapi_imgproc_perf_tests_cpu.cpp b/inference-engine/thirdparty/fluid/modules/gapi/perf/cpu/gapi_imgproc_perf_tests_cpu.cpp index 4de1b183089..0548f6c277c 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/perf/cpu/gapi_imgproc_perf_tests_cpu.cpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/perf/cpu/gapi_imgproc_perf_tests_cpu.cpp @@ -179,6 +179,11 @@ INSTANTIATE_TEST_CASE_P(EqHistPerfTestCPU, EqHistPerfTest, Values(szVGA, sz720p, sz1080p), Values(cv::compile_args(IMGPROC_CPU)))); +INSTANTIATE_TEST_CASE_P(BGR2RGBPerfTestCPU, BGR2RGBPerfTest, + Combine(Values(AbsExact().to_compare_f()), + Values(szVGA, sz720p, sz1080p), + Values(cv::compile_args(IMGPROC_CPU)))); + INSTANTIATE_TEST_CASE_P(RGB2GrayPerfTestCPU, RGB2GrayPerfTest, Combine(Values(AbsExact().to_compare_f()), Values(szVGA, sz720p, sz1080p), @@ -199,6 +204,26 @@ INSTANTIATE_TEST_CASE_P(YUV2RGBPerfTestCPU, YUV2RGBPerfTest, Values(szVGA, sz720p, sz1080p), Values(cv::compile_args(IMGPROC_CPU)))); +INSTANTIATE_TEST_CASE_P(BGR2I420PerfTestCPU, BGR2I420PerfTest, + Combine(Values(AbsExact().to_compare_f()), + Values(szVGA, sz720p, sz1080p), + Values(cv::compile_args(IMGPROC_CPU)))); + +INSTANTIATE_TEST_CASE_P(RGB2I420PerfTestCPU, RGB2I420PerfTest, + Combine(Values(AbsExact().to_compare_f()), + Values(szVGA, sz720p, sz1080p), + Values(cv::compile_args(IMGPROC_CPU)))); + +INSTANTIATE_TEST_CASE_P(I4202BGRPerfTestCPU, I4202BGRPerfTest, + Combine(Values(AbsExact().to_compare_f()), + Values(szVGA, sz720p, sz1080p), + Values(cv::compile_args(IMGPROC_CPU)))); + +INSTANTIATE_TEST_CASE_P(I4202RGBPerfTestCPU, I4202RGBPerfTest, + Combine(Values(AbsExact().to_compare_f()), + Values(szVGA, sz720p, sz1080p), + Values(cv::compile_args(IMGPROC_CPU)))); + INSTANTIATE_TEST_CASE_P(RGB2LabPerfTestCPU, RGB2LabPerfTest, Combine(Values(AbsExact().to_compare_f()), Values(szVGA, sz720p, sz1080p), diff --git a/inference-engine/thirdparty/fluid/modules/gapi/perf/gpu/gapi_core_perf_tests_gpu.cpp b/inference-engine/thirdparty/fluid/modules/gapi/perf/gpu/gapi_core_perf_tests_gpu.cpp index 0c5ff9e044a..2d8c254cd21 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/perf/gpu/gapi_core_perf_tests_gpu.cpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/perf/gpu/gapi_core_perf_tests_gpu.cpp @@ -2,7 +2,7 @@ // It is subject to the license terms in the LICENSE file found in the top-level directory // of this distribution and at http://opencv.org/license.html. // -// Copyright (C) 2018 Intel Corporation +// Copyright (C) 2018-2020 Intel Corporation #include "../perf_precomp.hpp" @@ -157,6 +157,12 @@ INSTANTIATE_TEST_CASE_P(SumPerfTestGPU, SumPerfTest, Values( CV_8UC1, CV_8UC3, CV_16UC1, CV_16SC1, CV_32FC1 ), Values(cv::compile_args(CORE_GPU)))); +INSTANTIATE_TEST_CASE_P(CountNonZeroPerfTestGPU, CountNonZeroPerfTest, + Combine(Values(AbsToleranceScalar(0.0).to_compare_f()), + Values(szSmall128, szVGA, sz720p, sz1080p), + Values(CV_8UC1, CV_16UC1, CV_16SC1, CV_32FC1), + Values(cv::compile_args(CORE_GPU)))); + INSTANTIATE_TEST_CASE_P(AddWeightedPerfTestGPU, AddWeightedPerfTest, Combine(Values(Tolerance_FloatRel_IntAbs(1e-6, 1).to_compare_f()), Values( szSmall128, szVGA, sz720p, sz1080p ), diff --git a/inference-engine/thirdparty/fluid/modules/gapi/samples/infer_ie_onnx_hybrid.cpp b/inference-engine/thirdparty/fluid/modules/gapi/samples/infer_ie_onnx_hybrid.cpp new file mode 100644 index 00000000000..b8612a25cac --- /dev/null +++ b/inference-engine/thirdparty/fluid/modules/gapi/samples/infer_ie_onnx_hybrid.cpp @@ -0,0 +1,195 @@ +#include +#include + +#include "opencv2/imgproc.hpp" +#include "opencv2/highgui.hpp" + +#include "opencv2/gapi.hpp" +#include "opencv2/gapi/core.hpp" +#include "opencv2/gapi/imgproc.hpp" +#include "opencv2/gapi/infer.hpp" +#include "opencv2/gapi/infer/ie.hpp" +#include "opencv2/gapi/infer/onnx.hpp" +#include "opencv2/gapi/cpu/gcpukernel.hpp" +#include "opencv2/gapi/streaming/cap.hpp" + +namespace { +const std::string keys = + "{ h help | | print this help message }" + "{ input | | Path to an input video file }" + "{ fdm | | IE face detection model IR }" + "{ fdw | | IE face detection model weights }" + "{ fdd | | IE face detection device }" + "{ emom | | ONNX emotions recognition model }" + "{ output | | (Optional) Path to an output video file }" + ; +} // namespace + +namespace custom { +G_API_NET(Faces, , "face-detector"); +G_API_NET(Emotions, , "emotions-recognition"); + +G_API_OP(PostProc, (cv::GMat, cv::GMat)>, "custom.fd_postproc") { + static cv::GArrayDesc outMeta(const cv::GMatDesc &, const cv::GMatDesc &) { + return cv::empty_array_desc(); + } +}; + +GAPI_OCV_KERNEL(OCVPostProc, PostProc) { + static void run(const cv::Mat &in_ssd_result, + const cv::Mat &in_frame, + std::vector &out_faces) { + const int MAX_PROPOSALS = 200; + const int OBJECT_SIZE = 7; + const cv::Size upscale = in_frame.size(); + const cv::Rect surface({0,0}, upscale); + + out_faces.clear(); + + const float *data = in_ssd_result.ptr(); + for (int i = 0; i < MAX_PROPOSALS; i++) { + const float image_id = data[i * OBJECT_SIZE + 0]; // batch id + const float confidence = data[i * OBJECT_SIZE + 2]; + const float rc_left = data[i * OBJECT_SIZE + 3]; + const float rc_top = data[i * OBJECT_SIZE + 4]; + const float rc_right = data[i * OBJECT_SIZE + 5]; + const float rc_bottom = data[i * OBJECT_SIZE + 6]; + + if (image_id < 0.f) { // indicates end of detections + break; + } + if (confidence < 0.5f) { + continue; + } + + cv::Rect rc; + rc.x = static_cast(rc_left * upscale.width); + rc.y = static_cast(rc_top * upscale.height); + rc.width = static_cast(rc_right * upscale.width) - rc.x; + rc.height = static_cast(rc_bottom * upscale.height) - rc.y; + out_faces.push_back(rc & surface); + } + } +}; +//! [Postproc] + +} // namespace custom + +namespace labels { +// Labels as defined in +// https://github.com/onnx/models/tree/master/vision/body_analysis/emotion_ferplus +// +const std::string emotions[] = { + "neutral", "happiness", "surprise", "sadness", "anger", "disgust", "fear", "contempt" +}; +namespace { +template +std::vector softmax(Iter begin, Iter end) { + std::vector prob(end - begin, 0.f); + std::transform(begin, end, prob.begin(), [](float x) { return std::exp(x); }); + float sum = std::accumulate(prob.begin(), prob.end(), 0.0f); + for (int i = 0; i < static_cast(prob.size()); i++) + prob[i] /= sum; + return prob; +} + +void DrawResults(cv::Mat &frame, + const std::vector &faces, + const std::vector &out_emotions) { + CV_Assert(faces.size() == out_emotions.size()); + + for (auto it = faces.begin(); it != faces.end(); ++it) { + const auto idx = std::distance(faces.begin(), it); + const auto &rc = *it; + + const float *emotions_data = out_emotions[idx].ptr(); + auto sm = softmax(emotions_data, emotions_data + 8); + const auto emo_id = std::max_element(sm.begin(), sm.end()) - sm.begin(); + + const int ATTRIB_OFFSET = 15; + cv::rectangle(frame, rc, {0, 255, 0}, 4); + cv::putText(frame, emotions[emo_id], + cv::Point(rc.x, rc.y - ATTRIB_OFFSET), + cv::FONT_HERSHEY_COMPLEX_SMALL, + 1, + cv::Scalar(0, 0, 255)); + + std::cout << emotions[emo_id] << " at " << rc << std::endl; + } +} +} // anonymous namespace +} // namespace labels + +int main(int argc, char *argv[]) +{ + cv::CommandLineParser cmd(argc, argv, keys); + if (cmd.has("help")) { + cmd.printMessage(); + return 0; + } + const std::string input = cmd.get("input"); + const std::string output = cmd.get("output"); + + // OpenVINO FD parameters here + auto det_net = cv::gapi::ie::Params { + cmd.get("fdm"), // read cmd args: path to topology IR + cmd.get("fdw"), // read cmd args: path to weights + cmd.get("fdd"), // read cmd args: device specifier + }; + + // ONNX Emotions parameters here + auto emo_net = cv::gapi::onnx::Params { + cmd.get("emom"), // read cmd args: path to the ONNX model + }.cfgNormalize({false}); // model accepts 0..255 range in FP32 + + auto kernels = cv::gapi::kernels(); + auto networks = cv::gapi::networks(det_net, emo_net); + + cv::GMat in; + cv::GMat bgr = cv::gapi::copy(in); + cv::GMat frame = cv::gapi::streaming::desync(bgr); + cv::GMat detections = cv::gapi::infer(frame); + cv::GArray faces = custom::PostProc::on(detections, frame); + cv::GArray emotions = cv::gapi::infer(faces, frame); + auto pipeline = cv::GComputation(cv::GIn(in), cv::GOut(bgr, faces, emotions)) + .compileStreaming(cv::compile_args(kernels, networks)); + + auto in_src = cv::gapi::wip::make_src(input); + pipeline.setSource(cv::gin(in_src)); + pipeline.start(); + + cv::util::optional out_frame; + cv::util::optional> out_faces; + cv::util::optional> out_emotions; + + cv::Mat last_mat; + std::vector last_faces; + std::vector last_emotions; + + cv::VideoWriter writer; + + while (pipeline.pull(cv::gout(out_frame, out_faces, out_emotions))) { + if (out_faces && out_emotions) { + last_faces = *out_faces; + last_emotions = *out_emotions; + } + if (out_frame) { + last_mat = *out_frame; + labels::DrawResults(last_mat, last_faces, last_emotions); + + if (!output.empty()) { + if (!writer.isOpened()) { + const auto sz = cv::Size{last_mat.cols, last_mat.rows}; + writer.open(output, cv::VideoWriter::fourcc('M','J','P','G'), 25.0, sz); + CV_Assert(writer.isOpened()); + } + writer << last_mat; + } + } + if (!last_mat.empty()) { + cv::imshow("Out", last_mat); + cv::waitKey(1); + } + } + return 0; +} diff --git a/inference-engine/thirdparty/fluid/modules/gapi/samples/infer_single_roi.cpp b/inference-engine/thirdparty/fluid/modules/gapi/samples/infer_single_roi.cpp new file mode 100644 index 00000000000..6054a3f4a62 --- /dev/null +++ b/inference-engine/thirdparty/fluid/modules/gapi/samples/infer_single_roi.cpp @@ -0,0 +1,264 @@ +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +const std::string keys = + "{ h help | | Print this help message }" + "{ input | | Path to the input video file }" + "{ facem | face-detection-adas-0001.xml | Path to OpenVINO IE face detection model (.xml) }" + "{ faced | CPU | Target device for face detection model (e.g. CPU, GPU, VPU, ...) }" + "{ r roi | -1,-1,-1,-1 | Region of interest (ROI) to use for inference. Identified automatically when not set }"; + +namespace { + +std::string weights_path(const std::string &model_path) { + const auto EXT_LEN = 4u; + const auto sz = model_path.size(); + CV_Assert(sz > EXT_LEN); + + auto ext = model_path.substr(sz - EXT_LEN); + std::transform(ext.begin(), ext.end(), ext.begin(), [](unsigned char c){ + return static_cast(std::tolower(c)); + }); + CV_Assert(ext == ".xml"); + return model_path.substr(0u, sz - EXT_LEN) + ".bin"; +} + +cv::util::optional parse_roi(const std::string &rc) { + cv::Rect rv; + char delim[3]; + + std::stringstream is(rc); + is >> rv.x >> delim[0] >> rv.y >> delim[1] >> rv.width >> delim[2] >> rv.height; + if (is.bad()) { + return cv::util::optional(); // empty value + } + const auto is_delim = [](char c) { + return c == ','; + }; + if (!std::all_of(std::begin(delim), std::end(delim), is_delim)) { + return cv::util::optional(); // empty value + + } + if (rv.x < 0 || rv.y < 0 || rv.width <= 0 || rv.height <= 0) { + return cv::util::optional(); // empty value + } + return cv::util::make_optional(std::move(rv)); +} + +} // namespace + +namespace custom { + +G_API_NET(FaceDetector, , "face-detector"); + +using GDetections = cv::GArray; +using GRect = cv::GOpaque; +using GSize = cv::GOpaque; +using GPrims = cv::GArray; + +G_API_OP(GetSize, , "sample.custom.get-size") { + static cv::GOpaqueDesc outMeta(const cv::GMatDesc &) { + return cv::empty_gopaque_desc(); + } +}; + +G_API_OP(LocateROI, , "sample.custom.locate-roi") { + static cv::GOpaqueDesc outMeta(const cv::GMatDesc &) { + return cv::empty_gopaque_desc(); + } +}; + +G_API_OP(ParseSSD, , "sample.custom.parse-ssd") { + static cv::GArrayDesc outMeta(const cv::GMatDesc &, const cv::GOpaqueDesc &, const cv::GOpaqueDesc &) { + return cv::empty_array_desc(); + } +}; + +G_API_OP(BBoxes, , "sample.custom.b-boxes") { + static cv::GArrayDesc outMeta(const cv::GArrayDesc &, const cv::GOpaqueDesc &) { + return cv::empty_array_desc(); + } +}; + +GAPI_OCV_KERNEL(OCVGetSize, GetSize) { + static void run(const cv::Mat &in, cv::Size &out) { + out = {in.cols, in.rows}; + } +}; + +GAPI_OCV_KERNEL(OCVLocateROI, LocateROI) { + // This is the place where we can run extra analytics + // on the input image frame and select the ROI (region + // of interest) where we want to detect our objects (or + // run any other inference). + // + // Currently it doesn't do anything intelligent, + // but only crops the input image to square (this is + // the most convenient aspect ratio for detectors to use) + + static void run(const cv::Mat &in_mat, cv::Rect &out_rect) { + + // Identify the central point & square size (- some padding) + const auto center = cv::Point{in_mat.cols/2, in_mat.rows/2}; + auto sqside = std::min(in_mat.cols, in_mat.rows); + + // Now build the central square ROI + out_rect = cv::Rect{ center.x - sqside/2 + , center.y - sqside/2 + , sqside + , sqside + }; + } +}; + +GAPI_OCV_KERNEL(OCVParseSSD, ParseSSD) { + static void run(const cv::Mat &in_ssd_result, + const cv::Rect &in_roi, + const cv::Size &in_parent_size, + std::vector &out_objects) { + const auto &in_ssd_dims = in_ssd_result.size; + CV_Assert(in_ssd_dims.dims() == 4u); + + const int MAX_PROPOSALS = in_ssd_dims[2]; + const int OBJECT_SIZE = in_ssd_dims[3]; + CV_Assert(OBJECT_SIZE == 7); // fixed SSD object size + + const cv::Size up_roi = in_roi.size(); + const cv::Rect surface({0,0}, in_parent_size); + + out_objects.clear(); + + const float *data = in_ssd_result.ptr(); + for (int i = 0; i < MAX_PROPOSALS; i++) { + const float image_id = data[i * OBJECT_SIZE + 0]; + const float label = data[i * OBJECT_SIZE + 1]; + const float confidence = data[i * OBJECT_SIZE + 2]; + const float rc_left = data[i * OBJECT_SIZE + 3]; + const float rc_top = data[i * OBJECT_SIZE + 4]; + const float rc_right = data[i * OBJECT_SIZE + 5]; + const float rc_bottom = data[i * OBJECT_SIZE + 6]; + (void) label; // unused + + if (image_id < 0.f) { + break; // marks end-of-detections + } + if (confidence < 0.5f) { + continue; // skip objects with low confidence + } + + // map relative coordinates to the original image scale + // taking the ROI into account + cv::Rect rc; + rc.x = static_cast(rc_left * up_roi.width); + rc.y = static_cast(rc_top * up_roi.height); + rc.width = static_cast(rc_right * up_roi.width) - rc.x; + rc.height = static_cast(rc_bottom * up_roi.height) - rc.y; + rc.x += in_roi.x; + rc.y += in_roi.y; + out_objects.emplace_back(rc & surface); + } + } +}; + +GAPI_OCV_KERNEL(OCVBBoxes, BBoxes) { + // This kernel converts the rectangles into G-API's + // rendering primitives + static void run(const std::vector &in_face_rcs, + const cv::Rect &in_roi, + std::vector &out_prims) { + out_prims.clear(); + const auto cvt = [](const cv::Rect &rc, const cv::Scalar &clr) { + return cv::gapi::wip::draw::Rect(rc, clr, 2); + }; + out_prims.emplace_back(cvt(in_roi, CV_RGB(0,255,255))); // cyan + for (auto &&rc : in_face_rcs) { + out_prims.emplace_back(cvt(rc, CV_RGB(0,255,0))); // green + } + } +}; + +} // namespace custom + +int main(int argc, char *argv[]) +{ + cv::CommandLineParser cmd(argc, argv, keys); + if (cmd.has("help")) { + cmd.printMessage(); + return 0; + } + + // Prepare parameters first + const std::string input = cmd.get("input"); + const auto opt_roi = parse_roi(cmd.get("roi")); + + const auto face_model_path = cmd.get("facem"); + auto face_net = cv::gapi::ie::Params { + face_model_path, // path to topology IR + weights_path(face_model_path), // path to weights + cmd.get("faced"), // device specifier + }; + auto kernels = cv::gapi::kernels + < custom::OCVGetSize + , custom::OCVLocateROI + , custom::OCVParseSSD + , custom::OCVBBoxes>(); + auto networks = cv::gapi::networks(face_net); + + // Now build the graph. The graph structure may vary + // pased on the input parameters + cv::GStreamingCompiled pipeline; + auto inputs = cv::gin(cv::gapi::wip::make_src(input)); + + if (opt_roi.has_value()) { + // Use the value provided by user + std::cout << "Will run inference for static region " + << opt_roi.value() + << " only" + << std::endl; + cv::GMat in; + cv::GOpaque in_roi; + auto blob = cv::gapi::infer(in_roi, in); + auto rcs = custom::ParseSSD::on(blob, in_roi, custom::GetSize::on(in)); + auto out = cv::gapi::wip::draw::render3ch(in, custom::BBoxes::on(rcs, in_roi)); + pipeline = cv::GComputation(cv::GIn(in, in_roi), cv::GOut(out)) + .compileStreaming(cv::compile_args(kernels, networks)); + + // Since the ROI to detect is manual, make it part of the input vector + inputs.push_back(cv::gin(opt_roi.value())[0]); + } else { + // Automatically detect ROI to infer. Make it output parameter + std::cout << "ROI is not set or invalid. Locating it automatically" + << std::endl; + cv::GMat in; + cv::GOpaque roi = custom::LocateROI::on(in); + auto blob = cv::gapi::infer(roi, in); + auto rcs = custom::ParseSSD::on(blob, roi, custom::GetSize::on(in)); + auto out = cv::gapi::wip::draw::render3ch(in, custom::BBoxes::on(rcs, roi)); + pipeline = cv::GComputation(cv::GIn(in), cv::GOut(out)) + .compileStreaming(cv::compile_args(kernels, networks)); + } + + // The execution part + pipeline.setSource(std::move(inputs)); + pipeline.start(); + + cv::Mat out; + while (pipeline.pull(cv::gout(out))) { + cv::imshow("Out", out); + cv::waitKey(1); + } + return 0; +} diff --git a/inference-engine/thirdparty/fluid/modules/gapi/samples/infer_ssd_onnx.cpp b/inference-engine/thirdparty/fluid/modules/gapi/samples/infer_ssd_onnx.cpp new file mode 100644 index 00000000000..fc26ca1e36f --- /dev/null +++ b/inference-engine/thirdparty/fluid/modules/gapi/samples/infer_ssd_onnx.cpp @@ -0,0 +1,213 @@ +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace custom { + +G_API_NET(ObjDetector, , "object-detector"); + +using GDetections = cv::GArray; +using GSize = cv::GOpaque; +using GPrims = cv::GArray; + +G_API_OP(GetSize, , "sample.custom.get-size") { + static cv::GOpaqueDesc outMeta(const cv::GMatDesc &) { + return cv::empty_gopaque_desc(); + } +}; +G_API_OP(ParseSSD, , "sample.custom.parse-ssd") { + static cv::GArrayDesc outMeta(const cv::GMatDesc &, const cv::GOpaqueDesc &) { + return cv::empty_array_desc(); + } +}; +G_API_OP(BBoxes, , "sample.custom.b-boxes") { + static cv::GArrayDesc outMeta(const cv::GArrayDesc &) { + return cv::empty_array_desc(); + } +}; + +GAPI_OCV_KERNEL(OCVGetSize, GetSize) { + static void run(const cv::Mat &in, cv::Size &out) { + out = {in.cols, in.rows}; + } +}; +GAPI_OCV_KERNEL(OCVParseSSD, ParseSSD) { + static void run(const cv::Mat &in_ssd_result, + const cv::Size &in_parent_size, + std::vector &out_objects) { + const auto &in_ssd_dims = in_ssd_result.size; + CV_Assert(in_ssd_dims.dims() == 4u); + + const int MAX_PROPOSALS = in_ssd_dims[2]; + const int OBJECT_SIZE = in_ssd_dims[3]; + + CV_Assert(OBJECT_SIZE == 7); // fixed SSD object size + + const cv::Rect surface({0,0}, in_parent_size); + + out_objects.clear(); + + const float *data = in_ssd_result.ptr(); + for (int i = 0; i < MAX_PROPOSALS; i++) { + const float image_id = data[i * OBJECT_SIZE + 0]; + const float label = data[i * OBJECT_SIZE + 1]; + const float confidence = data[i * OBJECT_SIZE + 2]; + const float rc_left = data[i * OBJECT_SIZE + 3]; + const float rc_top = data[i * OBJECT_SIZE + 4]; + const float rc_right = data[i * OBJECT_SIZE + 5]; + const float rc_bottom = data[i * OBJECT_SIZE + 6]; + (void) label; // unused + + if (image_id < 0.f) { + break; // marks end-of-detections + } + if (confidence < 0.5f) { + continue; // skip objects with low confidence + } + + // map relative coordinates to the original image scale + cv::Rect rc; + rc.x = static_cast(rc_left * in_parent_size.width); + rc.y = static_cast(rc_top * in_parent_size.height); + rc.width = static_cast(rc_right * in_parent_size.width) - rc.x; + rc.height = static_cast(rc_bottom * in_parent_size.height) - rc.y; + out_objects.emplace_back(rc & surface); + } + } +}; +GAPI_OCV_KERNEL(OCVBBoxes, BBoxes) { + // This kernel converts the rectangles into G-API's + // rendering primitives + static void run(const std::vector &in_obj_rcs, + std::vector &out_prims) { + out_prims.clear(); + const auto cvt = [](const cv::Rect &rc, const cv::Scalar &clr) { + return cv::gapi::wip::draw::Rect(rc, clr, 2); + }; + for (auto &&rc : in_obj_rcs) { + out_prims.emplace_back(cvt(rc, CV_RGB(0,255,0))); // green + } + + std::cout << "Detections:"; + for (auto &&rc : in_obj_rcs) std::cout << ' ' << rc; + std::cout << std::endl; + } +}; + +} // namespace custom + +namespace { +void remap_ssd_ports(const std::unordered_map &onnx, + std::unordered_map &gapi) { + // Assemble ONNX-processed outputs back to a single 1x1x200x7 blob + // to preserve compatibility with OpenVINO-based SSD pipeline + const cv::Mat &num_detections = onnx.at("num_detections:0"); + const cv::Mat &detection_boxes = onnx.at("detection_boxes:0"); + const cv::Mat &detection_scores = onnx.at("detection_scores:0"); + const cv::Mat &detection_classes = onnx.at("detection_classes:0"); + + GAPI_Assert(num_detections.depth() == CV_32F); + GAPI_Assert(detection_boxes.depth() == CV_32F); + GAPI_Assert(detection_scores.depth() == CV_32F); + GAPI_Assert(detection_classes.depth() == CV_32F); + + cv::Mat &ssd_output = gapi.at("detection_output"); + + const int num_objects = static_cast(num_detections.ptr()[0]); + const float *in_boxes = detection_boxes.ptr(); + const float *in_scores = detection_scores.ptr(); + const float *in_classes = detection_classes.ptr(); + float *ptr = ssd_output.ptr(); + + for (int i = 0; i < num_objects; i++) { + ptr[0] = 0.f; // "image_id" + ptr[1] = in_classes[i]; // "label" + ptr[2] = in_scores[i]; // "confidence" + ptr[3] = in_boxes[4*i + 1]; // left + ptr[4] = in_boxes[4*i + 0]; // top + ptr[5] = in_boxes[4*i + 3]; // right + ptr[6] = in_boxes[4*i + 2]; // bottom + + ptr += 7; + in_boxes += 4; + } + if (num_objects < ssd_output.size[2]-1) { + // put a -1 mark at the end of output blob if there is space left + ptr[0] = -1.f; + } +} +} // anonymous namespace + + +const std::string keys = + "{ h help | | Print this help message }" + "{ input | | Path to the input video file }" + "{ output | | (Optional) path to output video file }" + "{ detm | | Path to an ONNX SSD object detection model (.onnx) }" + ; + +int main(int argc, char *argv[]) +{ + cv::CommandLineParser cmd(argc, argv, keys); + if (cmd.has("help")) { + cmd.printMessage(); + return 0; + } + + // Prepare parameters first + const std::string input = cmd.get("input"); + const std::string output = cmd.get("output"); + const auto obj_model_path = cmd.get("detm"); + + auto obj_net = cv::gapi::onnx::Params{obj_model_path} + .cfgOutputLayers({"detection_output"}) + .cfgPostProc({cv::GMatDesc{CV_32F, {1,1,200,7}}}, remap_ssd_ports); + auto kernels = cv::gapi::kernels< custom::OCVGetSize + , custom::OCVParseSSD + , custom::OCVBBoxes>(); + auto networks = cv::gapi::networks(obj_net); + + // Now build the graph + cv::GMat in; + auto blob = cv::gapi::infer(in); + auto rcs = custom::ParseSSD::on(blob, custom::GetSize::on(in)); + auto out = cv::gapi::wip::draw::render3ch(in, custom::BBoxes::on(rcs)); + cv::GStreamingCompiled pipeline = cv::GComputation(cv::GIn(in), cv::GOut(out)) + .compileStreaming(cv::compile_args(kernels, networks)); + + auto inputs = cv::gin(cv::gapi::wip::make_src(input)); + + // The execution part + pipeline.setSource(std::move(inputs)); + pipeline.start(); + + cv::VideoWriter writer; + + cv::Mat outMat; + while (pipeline.pull(cv::gout(outMat))) { + cv::imshow("Out", outMat); + cv::waitKey(1); + if (!output.empty()) { + if (!writer.isOpened()) { + const auto sz = cv::Size{outMat.cols, outMat.rows}; + writer.open(output, cv::VideoWriter::fourcc('M','J','P','G'), 25.0, sz); + CV_Assert(writer.isOpened()); + } + writer << outMat; + } + } + return 0; +} diff --git a/inference-engine/thirdparty/fluid/modules/gapi/samples/slides_blur_gapi.cpp b/inference-engine/thirdparty/fluid/modules/gapi/samples/slides_blur_gapi.cpp new file mode 100644 index 00000000000..53246205b71 --- /dev/null +++ b/inference-engine/thirdparty/fluid/modules/gapi/samples/slides_blur_gapi.cpp @@ -0,0 +1,19 @@ +#include // G-API framework header +#include // cv::gapi::blur() +#include // cv::imread/imwrite + +int main(int argc, char *argv[]) { + if (argc < 3) return 1; + + cv::GMat in; // Express the graph: + cv::GMat out = cv::gapi::blur(in, cv::Size(3,3)); // `out` is a result of `blur` of `in` + + cv::Mat in_mat = cv::imread(argv[1]); // Get the real data + cv::Mat out_mat; // Output buffer (may be empty) + + cv::GComputation(cv::GIn(in), cv::GOut(out)) // Declare a graph from `in` to `out` + .apply(cv::gin(in_mat), cv::gout(out_mat)); // ...and run it immediately + + cv::imwrite(argv[2], out_mat); // Save the result + return 0; +} diff --git a/inference-engine/thirdparty/fluid/modules/gapi/samples/text_detection.cpp b/inference-engine/thirdparty/fluid/modules/gapi/samples/text_detection.cpp new file mode 100644 index 00000000000..da1bab6ca97 --- /dev/null +++ b/inference-engine/thirdparty/fluid/modules/gapi/samples/text_detection.cpp @@ -0,0 +1,698 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include + +const std::string about = + "This is an OpenCV-based version of OMZ Text Detection example"; +const std::string keys = + "{ h help | | Print this help message }" + "{ input | | Path to the input video file }" + "{ tdm | text-detection-0004.xml | Path to OpenVINO text detection model (.xml), versions 0003 and 0004 work }" + "{ tdd | CPU | Target device for the text detector (e.g. CPU, GPU, VPU, ...) }" + "{ trm | text-recognition-0012.xml | Path to OpenVINO text recognition model (.xml) }" + "{ trd | CPU | Target device for the text recognition (e.g. CPU, GPU, VPU, ...) }" + "{ bw | 0 | CTC beam search decoder bandwidth, if 0, a CTC greedy decoder is used}" + "{ sset | 0123456789abcdefghijklmnopqrstuvwxyz | Symbol set to use with text recognition decoder. Shouldn't contain symbol #. }" + "{ thr | 0.2 | Text recognition confidence threshold}" + ; + +namespace { +std::string weights_path(const std::string &model_path) { + const auto EXT_LEN = 4u; + const auto sz = model_path.size(); + CV_Assert(sz > EXT_LEN); + + const auto ext = model_path.substr(sz - EXT_LEN); + CV_Assert(cv::toLowerCase(ext) == ".xml"); + return model_path.substr(0u, sz - EXT_LEN) + ".bin"; +} + +////////////////////////////////////////////////////////////////////// +// Taken from OMZ samples as-is +template +void softmax_and_choose(Iter begin, Iter end, int *argmax, float *prob) { + auto max_element = std::max_element(begin, end); + *argmax = static_cast(std::distance(begin, max_element)); + float max_val = *max_element; + double sum = 0; + for (auto i = begin; i != end; i++) { + sum += std::exp((*i) - max_val); + } + if (std::fabs(sum) < std::numeric_limits::epsilon()) { + throw std::logic_error("sum can't be equal to zero"); + } + *prob = 1.0f / static_cast(sum); +} + +template +std::vector softmax(Iter begin, Iter end) { + std::vector prob(end - begin, 0.f); + std::transform(begin, end, prob.begin(), [](float x) { return std::exp(x); }); + float sum = std::accumulate(prob.begin(), prob.end(), 0.0f); + for (int i = 0; i < static_cast(prob.size()); i++) + prob[i] /= sum; + return prob; +} + +struct BeamElement { + std::vector sentence; //!< The sequence of chars that will be a result of the beam element + + float prob_blank; //!< The probability that the last char in CTC sequence + //!< for the beam element is the special blank char + + float prob_not_blank; //!< The probability that the last char in CTC sequence + //!< for the beam element is NOT the special blank char + + float prob() const { //!< The probability of the beam element. + return prob_blank + prob_not_blank; + } +}; + +std::string CTCGreedyDecoder(const float *data, + const std::size_t sz, + const std::string &alphabet, + const char pad_symbol, + double *conf) { + std::string res = ""; + bool prev_pad = false; + *conf = 1; + + const auto num_classes = alphabet.length(); + for (auto it = data; it != (data+sz); it += num_classes) { + int argmax = 0; + float prob = 0.f; + + softmax_and_choose(it, it + num_classes, &argmax, &prob); + (*conf) *= prob; + + auto symbol = alphabet[argmax]; + if (symbol != pad_symbol) { + if (res.empty() || prev_pad || (!res.empty() && symbol != res.back())) { + prev_pad = false; + res += symbol; + } + } else { + prev_pad = true; + } + } + return res; +} + +std::string CTCBeamSearchDecoder(const float *data, + const std::size_t sz, + const std::string &alphabet, + double *conf, + int bandwidth) { + const auto num_classes = alphabet.length(); + + std::vector curr; + std::vector last; + + last.push_back(BeamElement{std::vector(), 1.f, 0.f}); + + for (auto it = data; it != (data+sz); it += num_classes) { + curr.clear(); + + std::vector prob = softmax(it, it + num_classes); + + for(const auto& candidate: last) { + float prob_not_blank = 0.f; + const std::vector& candidate_sentence = candidate.sentence; + if (!candidate_sentence.empty()) { + int n = candidate_sentence.back(); + prob_not_blank = candidate.prob_not_blank * prob[n]; + } + float prob_blank = candidate.prob() * prob[num_classes - 1]; + + auto check_res = std::find_if(curr.begin(), + curr.end(), + [&candidate_sentence](const BeamElement& n) { + return n.sentence == candidate_sentence; + }); + if (check_res == std::end(curr)) { + curr.push_back(BeamElement{candidate.sentence, prob_blank, prob_not_blank}); + } else { + check_res->prob_not_blank += prob_not_blank; + if (check_res->prob_blank != 0.f) { + throw std::logic_error("Probability that the last char in CTC-sequence " + "is the special blank char must be zero here"); + } + check_res->prob_blank = prob_blank; + } + + for (int i = 0; i < static_cast(num_classes) - 1; i++) { + auto extend = candidate_sentence; + extend.push_back(i); + + if (candidate_sentence.size() > 0 && candidate.sentence.back() == i) { + prob_not_blank = prob[i] * candidate.prob_blank; + } else { + prob_not_blank = prob[i] * candidate.prob(); + } + + auto check_res2 = std::find_if(curr.begin(), + curr.end(), + [&extend](const BeamElement &n) { + return n.sentence == extend; + }); + if (check_res2 == std::end(curr)) { + curr.push_back(BeamElement{extend, 0.f, prob_not_blank}); + } else { + check_res2->prob_not_blank += prob_not_blank; + } + } + } + + sort(curr.begin(), curr.end(), [](const BeamElement &a, const BeamElement &b) -> bool { + return a.prob() > b.prob(); + }); + + last.clear(); + int num_to_copy = std::min(bandwidth, static_cast(curr.size())); + for (int b = 0; b < num_to_copy; b++) { + last.push_back(curr[b]); + } + } + + *conf = last[0].prob(); + std::string res=""; + for (const auto& idx: last[0].sentence) { + res += alphabet[idx]; + } + + return res; +} + +////////////////////////////////////////////////////////////////////// +} // anonymous namespace + +namespace custom { +namespace { + +////////////////////////////////////////////////////////////////////// +// Define networks for this sample +using GMat2 = std::tuple; +G_API_NET(TextDetection, + , + "sample.custom.text_detect"); + +G_API_NET(TextRecognition, + , + "sample.custom.text_recogn"); + +// Define custom operations +using GSize = cv::GOpaque; +using GRRects = cv::GArray; +G_API_OP(PostProcess, + , + "sample.custom.text.post_proc") { + static cv::GArrayDesc outMeta(const cv::GMatDesc &, + const cv::GMatDesc &, + const cv::GOpaqueDesc &, + float, + float) { + return cv::empty_array_desc(); + } +}; + +using GMats = cv::GArray; +G_API_OP(CropLabels, + , + "sample.custom.text.crop") { + static cv::GArrayDesc outMeta(const cv::GMatDesc &, + const cv::GArrayDesc &, + const cv::GOpaqueDesc &) { + return cv::empty_array_desc(); + } +}; + +////////////////////////////////////////////////////////////////////// +// Implement custom operations +GAPI_OCV_KERNEL(OCVPostProcess, PostProcess) { + static void run(const cv::Mat &link, + const cv::Mat &segm, + const cv::Size &img_size, + const float link_threshold, + const float segm_threshold, + std::vector &out) { + // NOTE: Taken from the OMZ text detection sample almost as-is + const int kMinArea = 300; + const int kMinHeight = 10; + + const float *link_data_pointer = link.ptr(); + std::vector link_data(link_data_pointer, link_data_pointer + link.total()); + link_data = transpose4d(link_data, dimsToShape(link.size), {0, 2, 3, 1}); + softmax(link_data); + link_data = sliceAndGetSecondChannel(link_data); + std::vector new_link_data_shape = { + link.size[0], + link.size[2], + link.size[3], + link.size[1]/2, + }; + + const float *cls_data_pointer = segm.ptr(); + std::vector cls_data(cls_data_pointer, cls_data_pointer + segm.total()); + cls_data = transpose4d(cls_data, dimsToShape(segm.size), {0, 2, 3, 1}); + softmax(cls_data); + cls_data = sliceAndGetSecondChannel(cls_data); + std::vector new_cls_data_shape = { + segm.size[0], + segm.size[2], + segm.size[3], + segm.size[1]/2, + }; + + out = maskToBoxes(decodeImageByJoin(cls_data, new_cls_data_shape, + link_data, new_link_data_shape, + segm_threshold, link_threshold), + static_cast(kMinArea), + static_cast(kMinHeight), + img_size); + } + + static std::vector dimsToShape(const cv::MatSize &sz) { + const int n_dims = sz.dims(); + std::vector result; + result.reserve(n_dims); + + // cv::MatSize is not iterable... + for (int i = 0; i < n_dims; i++) { + result.emplace_back(static_cast(sz[i])); + } + return result; + } + + static void softmax(std::vector &rdata) { + // NOTE: Taken from the OMZ text detection sample almost as-is + const size_t last_dim = 2; + for (size_t i = 0 ; i < rdata.size(); i+=last_dim) { + float m = std::max(rdata[i], rdata[i+1]); + rdata[i] = std::exp(rdata[i] - m); + rdata[i + 1] = std::exp(rdata[i + 1] - m); + float s = rdata[i] + rdata[i + 1]; + rdata[i] /= s; + rdata[i + 1] /= s; + } + } + + static std::vector transpose4d(const std::vector &data, + const std::vector &shape, + const std::vector &axes) { + // NOTE: Taken from the OMZ text detection sample almost as-is + if (shape.size() != axes.size()) + throw std::runtime_error("Shape and axes must have the same dimension."); + + for (size_t a : axes) { + if (a >= shape.size()) + throw std::runtime_error("Axis must be less than dimension of shape."); + } + size_t total_size = shape[0]*shape[1]*shape[2]*shape[3]; + std::vector steps { + shape[axes[1]]*shape[axes[2]]*shape[axes[3]], + shape[axes[2]]*shape[axes[3]], + shape[axes[3]], + 1 + }; + + size_t source_data_idx = 0; + std::vector new_data(total_size, 0); + std::vector ids(shape.size()); + for (ids[0] = 0; ids[0] < shape[0]; ids[0]++) { + for (ids[1] = 0; ids[1] < shape[1]; ids[1]++) { + for (ids[2] = 0; ids[2] < shape[2]; ids[2]++) { + for (ids[3]= 0; ids[3] < shape[3]; ids[3]++) { + size_t new_data_idx = ids[axes[0]]*steps[0] + ids[axes[1]]*steps[1] + + ids[axes[2]]*steps[2] + ids[axes[3]]*steps[3]; + new_data[new_data_idx] = data[source_data_idx++]; + } + } + } + } + return new_data; + } + + static std::vector sliceAndGetSecondChannel(const std::vector &data) { + // NOTE: Taken from the OMZ text detection sample almost as-is + std::vector new_data(data.size() / 2, 0); + for (size_t i = 0; i < data.size() / 2; i++) { + new_data[i] = data[2 * i + 1]; + } + return new_data; + } + + static void join(const int p1, + const int p2, + std::unordered_map &group_mask) { + // NOTE: Taken from the OMZ text detection sample almost as-is + const int root1 = findRoot(p1, group_mask); + const int root2 = findRoot(p2, group_mask); + if (root1 != root2) { + group_mask[root1] = root2; + } + } + + static cv::Mat decodeImageByJoin(const std::vector &cls_data, + const std::vector &cls_data_shape, + const std::vector &link_data, + const std::vector &link_data_shape, + float cls_conf_threshold, + float link_conf_threshold) { + // NOTE: Taken from the OMZ text detection sample almost as-is + const int h = cls_data_shape[1]; + const int w = cls_data_shape[2]; + + std::vector pixel_mask(h * w, 0); + std::unordered_map group_mask; + std::vector points; + for (int i = 0; i < static_cast(pixel_mask.size()); i++) { + pixel_mask[i] = cls_data[i] >= cls_conf_threshold; + if (pixel_mask[i]) { + points.emplace_back(i % w, i / w); + group_mask[i] = -1; + } + } + std::vector link_mask(link_data.size(), 0); + for (size_t i = 0; i < link_mask.size(); i++) { + link_mask[i] = link_data[i] >= link_conf_threshold; + } + size_t neighbours = size_t(link_data_shape[3]); + for (const auto &point : points) { + size_t neighbour = 0; + for (int ny = point.y - 1; ny <= point.y + 1; ny++) { + for (int nx = point.x - 1; nx <= point.x + 1; nx++) { + if (nx == point.x && ny == point.y) + continue; + if (nx >= 0 && nx < w && ny >= 0 && ny < h) { + uchar pixel_value = pixel_mask[size_t(ny) * size_t(w) + size_t(nx)]; + uchar link_value = link_mask[(size_t(point.y) * size_t(w) + size_t(point.x)) + *neighbours + neighbour]; + if (pixel_value && link_value) { + join(point.x + point.y * w, nx + ny * w, group_mask); + } + } + neighbour++; + } + } + } + return get_all(points, w, h, group_mask); + } + + static cv::Mat get_all(const std::vector &points, + const int w, + const int h, + std::unordered_map &group_mask) { + // NOTE: Taken from the OMZ text detection sample almost as-is + std::unordered_map root_map; + cv::Mat mask(h, w, CV_32S, cv::Scalar(0)); + for (const auto &point : points) { + int point_root = findRoot(point.x + point.y * w, group_mask); + if (root_map.find(point_root) == root_map.end()) { + root_map.emplace(point_root, static_cast(root_map.size() + 1)); + } + mask.at(point.x + point.y * w) = root_map[point_root]; + } + return mask; + } + + static int findRoot(const int point, + std::unordered_map &group_mask) { + // NOTE: Taken from the OMZ text detection sample almost as-is + int root = point; + bool update_parent = false; + while (group_mask.at(root) != -1) { + root = group_mask.at(root); + update_parent = true; + } + if (update_parent) { + group_mask[point] = root; + } + return root; + } + + static std::vector maskToBoxes(const cv::Mat &mask, + const float min_area, + const float min_height, + const cv::Size &image_size) { + // NOTE: Taken from the OMZ text detection sample almost as-is + std::vector bboxes; + double min_val = 0.; + double max_val = 0.; + cv::minMaxLoc(mask, &min_val, &max_val); + int max_bbox_idx = static_cast(max_val); + cv::Mat resized_mask; + cv::resize(mask, resized_mask, image_size, 0, 0, cv::INTER_NEAREST); + + for (int i = 1; i <= max_bbox_idx; i++) { + cv::Mat bbox_mask = resized_mask == i; + std::vector> contours; + + cv::findContours(bbox_mask, contours, cv::RETR_CCOMP, cv::CHAIN_APPROX_SIMPLE); + if (contours.empty()) + continue; + cv::RotatedRect r = cv::minAreaRect(contours[0]); + if (std::min(r.size.width, r.size.height) < min_height) + continue; + if (r.size.area() < min_area) + continue; + bboxes.emplace_back(r); + } + return bboxes; + } +}; // GAPI_OCV_KERNEL(PostProcess) + +GAPI_OCV_KERNEL(OCVCropLabels, CropLabels) { + static void run(const cv::Mat &image, + const std::vector &detections, + const cv::Size &outSize, + std::vector &out) { + out.clear(); + out.reserve(detections.size()); + cv::Mat crop(outSize, CV_8UC3, cv::Scalar(0)); + cv::Mat gray(outSize, CV_8UC1, cv::Scalar(0)); + std::vector blob_shape = {1,1,outSize.height,outSize.width}; + + for (auto &&rr : detections) { + std::vector points(4); + rr.points(points.data()); + + const auto top_left_point_idx = topLeftPointIdx(points); + cv::Point2f point0 = points[static_cast(top_left_point_idx)]; + cv::Point2f point1 = points[(top_left_point_idx + 1) % 4]; + cv::Point2f point2 = points[(top_left_point_idx + 2) % 4]; + + std::vector from{point0, point1, point2}; + std::vector to{ + cv::Point2f(0.0f, 0.0f), + cv::Point2f(static_cast(outSize.width-1), 0.0f), + cv::Point2f(static_cast(outSize.width-1), + static_cast(outSize.height-1)) + }; + cv::Mat M = cv::getAffineTransform(from, to); + cv::warpAffine(image, crop, M, outSize); + cv::cvtColor(crop, gray, cv::COLOR_BGR2GRAY); + + cv::Mat blob; + gray.convertTo(blob, CV_32F); + out.push_back(blob.reshape(1, blob_shape)); // pass as 1,1,H,W instead of H,W + } + } + + static int topLeftPointIdx(const std::vector &points) { + // NOTE: Taken from the OMZ text detection sample almost as-is + cv::Point2f most_left(std::numeric_limits::max(), + std::numeric_limits::max()); + cv::Point2f almost_most_left(std::numeric_limits::max(), + std::numeric_limits::max()); + int most_left_idx = -1; + int almost_most_left_idx = -1; + + for (size_t i = 0; i < points.size() ; i++) { + if (most_left.x > points[i].x) { + if (most_left.x < std::numeric_limits::max()) { + almost_most_left = most_left; + almost_most_left_idx = most_left_idx; + } + most_left = points[i]; + most_left_idx = static_cast(i); + } + if (almost_most_left.x > points[i].x && points[i] != most_left) { + almost_most_left = points[i]; + almost_most_left_idx = static_cast(i); + } + } + + if (almost_most_left.y < most_left.y) { + most_left = almost_most_left; + most_left_idx = almost_most_left_idx; + } + return most_left_idx; + } + +}; // GAPI_OCV_KERNEL(CropLabels) + +} // anonymous namespace +} // namespace custom + +namespace vis { +namespace { + +void drawRotatedRect(cv::Mat &m, const cv::RotatedRect &rc) { + std::vector tmp_points(5); + rc.points(tmp_points.data()); + tmp_points[4] = tmp_points[0]; + auto prev = tmp_points.begin(), it = prev+1; + for (; it != tmp_points.end(); ++it) { + cv::line(m, *prev, *it, cv::Scalar(50, 205, 50), 2); + prev = it; + } +} + +void drawText(cv::Mat &m, const cv::RotatedRect &rc, const std::string &str) { + const int fface = cv::FONT_HERSHEY_SIMPLEX; + const double scale = 0.7; + const int thick = 1; + int base = 0; + const auto text_size = cv::getTextSize(str, fface, scale, thick, &base); + + std::vector tmp_points(4); + rc.points(tmp_points.data()); + const auto tl_point_idx = custom::OCVCropLabels::topLeftPointIdx(tmp_points); + cv::Point text_pos = tmp_points[tl_point_idx]; + text_pos.x = std::max(0, text_pos.x); + text_pos.y = std::max(text_size.height, text_pos.y); + + cv::rectangle(m, + text_pos + cv::Point{0, base}, + text_pos + cv::Point{text_size.width, -text_size.height}, + CV_RGB(50, 205, 50), + cv::FILLED); + const auto white = CV_RGB(255, 255, 255); + cv::putText(m, str, text_pos, fface, scale, white, thick, 8); +} + +} // anonymous namespace +} // namespace vis + +int main(int argc, char *argv[]) +{ + cv::CommandLineParser cmd(argc, argv, keys); + cmd.about(about); + if (cmd.has("help")) { + cmd.printMessage(); + return 0; + } + const auto input_file_name = cmd.get("input"); + const auto tdet_model_path = cmd.get("tdm"); + const auto trec_model_path = cmd.get("trm"); + const auto tdet_target_dev = cmd.get("tdd"); + const auto trec_target_dev = cmd.get("trd"); + const auto ctc_beam_dec_bw = cmd.get("bw"); + const auto dec_conf_thresh = cmd.get("thr"); + + const auto pad_symbol = '#'; + const auto symbol_set = cmd.get("sset") + pad_symbol; + + cv::GMat in; + cv::GOpaque in_rec_sz; + cv::GMat link, segm; + std::tie(link, segm) = cv::gapi::infer(in); + cv::GOpaque size = cv::gapi::streaming::size(in); + cv::GArray rrs = custom::PostProcess::on(link, segm, size, 0.8f, 0.8f); + cv::GArray labels = custom::CropLabels::on(in, rrs, in_rec_sz); + cv::GArray text = cv::gapi::infer2(in, labels); + + cv::GComputation graph(cv::GIn(in, in_rec_sz), + cv::GOut(cv::gapi::copy(in), rrs, text)); + + // Text detection network + auto tdet_net = cv::gapi::ie::Params { + tdet_model_path, // path to topology IR + weights_path(tdet_model_path), // path to weights + tdet_target_dev, // device specifier + }.cfgOutputLayers({"model/link_logits_/add", "model/segm_logits/add"}); + + auto trec_net = cv::gapi::ie::Params { + trec_model_path, // path to topology IR + weights_path(trec_model_path), // path to weights + trec_target_dev, // device specifier + }; + auto networks = cv::gapi::networks(tdet_net, trec_net); + + auto kernels = cv::gapi::kernels< custom::OCVPostProcess + , custom::OCVCropLabels + >(); + auto pipeline = graph.compileStreaming(cv::compile_args(kernels, networks)); + + std::cout << "Reading " << input_file_name << std::endl; + + // Input stream + auto in_src = cv::gapi::wip::make_src(input_file_name); + + // Text recognition input size (also an input parameter to the graph) + auto in_rsz = cv::Size{ 120, 32 }; + + // Set the pipeline source & start the pipeline + pipeline.setSource(cv::gin(in_src, in_rsz)); + pipeline.start(); + + // Declare the output data & run the processing loop + cv::TickMeter tm; + cv::Mat image; + std::vector out_rcs; + std::vector out_text; + + tm.start(); + int frames = 0; + while (pipeline.pull(cv::gout(image, out_rcs, out_text))) { + frames++; + + CV_Assert(out_rcs.size() == out_text.size()); + const auto num_labels = out_rcs.size(); + + std::vector tmp_points(4); + for (std::size_t l = 0; l < num_labels; l++) { + // Decode the recognized text in the rectangle + const auto &blob = out_text[l]; + const float *data = blob.ptr(); + const auto sz = blob.total(); + double conf = 1.0; + const std::string res = ctc_beam_dec_bw == 0 + ? CTCGreedyDecoder(data, sz, symbol_set, pad_symbol, &conf) + : CTCBeamSearchDecoder(data, sz, symbol_set, &conf, ctc_beam_dec_bw); + + // Draw a bounding box for this rotated rectangle + const auto &rc = out_rcs[l]; + vis::drawRotatedRect(image, rc); + + // Draw text, if decoded + if (conf >= dec_conf_thresh) { + vis::drawText(image, rc, res); + } + } + tm.stop(); + cv::imshow("Out", image); + cv::waitKey(1); + tm.start(); + } + tm.stop(); + std::cout << "Processed " << frames << " frames" + << " (" << frames / tm.getTimeSec() << " FPS)" << std::endl; + return 0; +} diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/api/gbackend.cpp b/inference-engine/thirdparty/fluid/modules/gapi/src/api/gbackend.cpp index cc0b1a912ba..1e7b8a2a8df 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/src/api/gbackend.cpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/api/gbackend.cpp @@ -2,7 +2,7 @@ // It is subject to the license terms in the LICENSE file found in the top-level directory // of this distribution and at http://opencv.org/license.html. // -// Copyright (C) 2018 Intel Corporation +// Copyright (C) 2018-2020 Intel Corporation #include "precomp.hpp" @@ -67,6 +67,21 @@ cv::gapi::GKernelPackage cv::gapi::GBackend::Priv::auxiliaryKernels() const return {}; } +bool cv::gapi::GBackend::Priv::controlsMerge() const +{ + return false; +} + +bool cv::gapi::GBackend::Priv::allowsMerge(const cv::gimpl::GIslandModel::Graph &, + const ade::NodeHandle &, + const ade::NodeHandle &, + const ade::NodeHandle &) const +{ + GAPI_Assert(controlsMerge()); + return true; +} + + // GBackend public implementation ////////////////////////////////////////////// cv::gapi::GBackend::GBackend() { @@ -103,38 +118,42 @@ namespace cv { namespace gimpl { namespace magazine { -// FIXME implement the below functions with visit()? +namespace { +// Utility function, used in both bindInArg and bindOutArg, +// implements default RMat bind behaviour (if backend doesn't handle RMats in specific way): +// view + wrapped cv::Mat are placed into the magazine +void bindRMat(Mag& mag, const RcDesc& rc, const cv::RMat& rmat, RMat::Access a) +{ + auto& matv = mag.template slot()[rc.id]; + matv = rmat.access(a); + mag.template slot()[rc.id] = asMat(matv); +} +} // anonymous namespace -void bindInArg(Mag& mag, const RcDesc &rc, const GRunArg &arg, bool is_umat) +// FIXME implement the below functions with visit()? +void bindInArg(Mag& mag, const RcDesc &rc, const GRunArg &arg, HandleRMat handleRMat) { switch (rc.shape) { case GShape::GMAT: { - switch (arg.index()) - { - case GRunArg::index_of() : - if (is_umat) - { + // In case of handleRMat == SKIP + // We assume that backend can work with some device-specific RMats + // and will handle them in some specific way, so just return + if (handleRMat == HandleRMat::SKIP) return; + GAPI_Assert(arg.index() == GRunArg::index_of()); + bindRMat(mag, rc, util::get(arg), RMat::Access::R); + + // FIXME: Here meta may^WWILL be copied multiple times! + // Replace it is reference-counted object? + mag.meta()[rc.id] = arg.meta; + mag.meta()[rc.id] = arg.meta; #if !defined(GAPI_STANDALONE) - auto& mag_umat = mag.template slot()[rc.id]; - mag_umat = util::get(arg).getUMat(ACCESS_READ); -#else - util::throw_error(std::logic_error("UMat is not supported in standalone build")); -#endif // !defined(GAPI_STANDALONE) - } - else - { - auto& mag_mat = mag.template slot()[rc.id]; - mag_mat = util::get(arg); - } - break; - default: util::throw_error(std::logic_error("content type of the runtime argument does not match to resource description ?")); - } + mag.meta()[rc.id] = arg.meta; +#endif break; } - case GShape::GSCALAR: { auto& mag_scalar = mag.template slot()[rc.id]; @@ -143,15 +162,23 @@ void bindInArg(Mag& mag, const RcDesc &rc, const GRunArg &arg, bool is_umat) case GRunArg::index_of() : mag_scalar = util::get(arg); break; default: util::throw_error(std::logic_error("content type of the runtime argument does not match to resource description ?")); } + mag.meta()[rc.id] = arg.meta; break; } case GShape::GARRAY: - mag.template slot()[rc.id] = util::get(arg); + mag.slot()[rc.id] = util::get(arg); + mag.meta()[rc.id] = arg.meta; break; case GShape::GOPAQUE: - mag.template slot()[rc.id] = util::get(arg); + mag.slot()[rc.id] = util::get(arg); + mag.meta()[rc.id] = arg.meta; + break; + + case GShape::GFRAME: + mag.slot()[rc.id] = util::get(arg); + mag.meta()[rc.id] = arg.meta; break; default: @@ -159,32 +186,18 @@ void bindInArg(Mag& mag, const RcDesc &rc, const GRunArg &arg, bool is_umat) } } -void bindOutArg(Mag& mag, const RcDesc &rc, const GRunArgP &arg, bool is_umat) +void bindOutArg(Mag& mag, const RcDesc &rc, const GRunArgP &arg, HandleRMat handleRMat) { switch (rc.shape) { case GShape::GMAT: { - switch (arg.index()) - { - case GRunArgP::index_of() : - if (is_umat) - { -#if !defined(GAPI_STANDALONE) - auto& mag_umat = mag.template slot()[rc.id]; - mag_umat = util::get(arg)->getUMat(ACCESS_RW); -#else - util::throw_error(std::logic_error("UMat is not supported in standalone build")); -#endif // !defined(GAPI_STANDALONE) - } - else - { - auto& mag_mat = mag.template slot()[rc.id]; - mag_mat = *util::get(arg); - } - break; - default: util::throw_error(std::logic_error("content type of the runtime argument does not match to resource description ?")); - } + // In case of handleRMat == SKIP + // We assume that backend can work with some device-specific RMats + // and will handle them in some specific way, so just return + if (handleRMat == HandleRMat::SKIP) return; + GAPI_Assert(arg.index() == GRunArgP::index_of()); + bindRMat(mag, rc, *util::get(arg), RMat::Access::W); break; } @@ -198,6 +211,9 @@ void bindOutArg(Mag& mag, const RcDesc &rc, const GRunArgP &arg, bool is_umat) } break; } + case GShape::GFRAME: + mag.template slot()[rc.id] = *util::get(arg); + break; case GShape::GARRAY: mag.template slot()[rc.id] = util::get(arg); break; @@ -234,6 +250,7 @@ void resetInternalData(Mag& mag, const Data &d) break; case GShape::GMAT: + case GShape::GFRAME: // Do nothing here - FIXME unify with initInternalData? break; @@ -248,12 +265,23 @@ cv::GRunArg getArg(const Mag& mag, const RcDesc &ref) // Wrap associated CPU object (either host or an internal one) switch (ref.shape) { - case GShape::GMAT: return GRunArg(mag.template slot().at(ref.id)); - case GShape::GSCALAR: return GRunArg(mag.template slot().at(ref.id)); + case GShape::GMAT: + return GRunArg(mag.slot().at(ref.id), + mag.meta().at(ref.id)); + case GShape::GSCALAR: + return GRunArg(mag.slot().at(ref.id), + mag.meta().at(ref.id)); // Note: .at() is intentional for GArray and GOpaque as objects MUST be already there // (and constructed by either bindIn/Out or resetInternal) - case GShape::GARRAY: return GRunArg(mag.template slot().at(ref.id)); - case GShape::GOPAQUE: return GRunArg(mag.template slot().at(ref.id)); + case GShape::GARRAY: + return GRunArg(mag.slot().at(ref.id), + mag.meta().at(ref.id)); + case GShape::GOPAQUE: + return GRunArg(mag.slot().at(ref.id), + mag.meta().at(ref.id)); + case GShape::GFRAME: + return GRunArg(mag.slot().at(ref.id), + mag.meta().at(ref.id)); default: util::throw_error(std::logic_error("Unsupported GShape type")); break; @@ -294,52 +322,24 @@ cv::GRunArgP getObjPtr(Mag& mag, const RcDesc &rc, bool is_umat) // debugging this!!!1 return GRunArgP(const_cast(mag) .template slot().at(rc.id)); + case GShape::GFRAME: + return GRunArgP(&mag.template slot()[rc.id]); + default: util::throw_error(std::logic_error("Unsupported GShape type")); break; } } -void writeBack(const Mag& mag, const RcDesc &rc, GRunArgP &g_arg, bool is_umat) +void writeBack(const Mag& mag, const RcDesc &rc, GRunArgP &g_arg) { switch (rc.shape) { case GShape::GARRAY: - // Do nothing - should we really do anything here? - break; - case GShape::GOPAQUE: - // Do nothing - should we really do anything here? - break; - case GShape::GMAT: - { - //simply check that memory was not reallocated, i.e. - //both instances of Mat pointing to the same memory - uchar* out_arg_data = nullptr; - switch (g_arg.index()) - { - case GRunArgP::index_of() : out_arg_data = util::get(g_arg)->data; break; -#if !defined(GAPI_STANDALONE) - case GRunArgP::index_of() : out_arg_data = (util::get(g_arg))->getMat(ACCESS_RW).data; break; -#endif // !defined(GAPI_STANDALONE) - default: util::throw_error(std::logic_error("content type of the runtime argument does not match to resource description ?")); - } - if (is_umat) - { -#if !defined(GAPI_STANDALONE) - auto& in_mag = mag.template slot().at(rc.id); - GAPI_Assert((out_arg_data == (in_mag.getMat(ACCESS_RW).data)) && " data for output parameters was reallocated ?"); -#else - util::throw_error(std::logic_error("UMat is not supported in standalone build")); -#endif // !defined(GAPI_STANDALONE) - } - else - { - auto& in_mag = mag.template slot().at(rc.id); - GAPI_Assert((out_arg_data == in_mag.data) && " data for output parameters was reallocated ?"); - } + case GShape::GOPAQUE: + // Do nothing - should we really do anything here? break; - } case GShape::GSCALAR: { @@ -351,12 +351,50 @@ void writeBack(const Mag& mag, const RcDesc &rc, GRunArgP &g_arg, bool is_umat) break; } + case GShape::GFRAME: + { + *util::get(g_arg) = mag.template slot().at(rc.id); + break; + } + default: util::throw_error(std::logic_error("Unsupported GShape type")); break; } } +void unbind(Mag& mag, const RcDesc &rc) +{ + switch (rc.shape) + { + case GShape::GARRAY: + case GShape::GOPAQUE: + case GShape::GSCALAR: + // TODO: Do nothing - should we really do anything here? + break; + + case GShape::GMAT: + // Clean-up everything - a cv::Mat, cv::RMat::View, a cv::UMat, and cv::RMat + // if applicable + mag.slot().erase(rc.id); +#if !defined(GAPI_STANDALONE) + mag.slot().erase(rc.id); +#endif + mag.slot().erase(rc.id); + mag.slot().erase(rc.id); + break; + + case GShape::GFRAME: + // MediaFrame can also be associated with external memory, + // so requires a special handling here. + mag.slot().erase(rc.id); + break; + + default: + GAPI_Assert(false); + } +} + } // namespace magazine void createMat(const cv::GMatDesc &desc, cv::Mat& mat) diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/api/gbackend_priv.hpp b/inference-engine/thirdparty/fluid/modules/gapi/src/api/gbackend_priv.hpp index 13f39acc86c..45237514a53 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/src/api/gbackend_priv.hpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/api/gbackend_priv.hpp @@ -19,7 +19,7 @@ #include "opencv2/gapi/gkernel.hpp" #include "compiler/gmodel.hpp" - +#include "compiler/gislandmodel.hpp" namespace cv { @@ -68,6 +68,22 @@ public: virtual cv::gapi::GKernelPackage auxiliaryKernels() const; + // Ask backend if it has a custom control over island fusion process + // This method is quite redundant but there's nothing better fits + // the current fusion process. By default, [existing] backends don't + // control the merge. + // FIXME: Refactor to a single entity? + virtual bool controlsMerge() const; + + // Ask backend if it is ok to merge these two islands connected + // via a data slot. By default, [existing] backends allow to merge everything. + // FIXME: Refactor to a single entity? + // FIXME: Strip down the type details form graph? (make it ade::Graph?) + virtual bool allowsMerge(const cv::gimpl::GIslandModel::Graph &g, + const ade::NodeHandle &a_nh, + const ade::NodeHandle &slot_nh, + const ade::NodeHandle &b_nh) const; + virtual ~Priv() = default; }; diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/api/gcall.cpp b/inference-engine/thirdparty/fluid/modules/gapi/src/api/gcall.cpp index 6f5f65bbfd4..618107f3463 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/src/api/gcall.cpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/api/gcall.cpp @@ -69,6 +69,11 @@ cv::detail::GOpaqueU cv::GCall::yieldOpaque(int output) return cv::detail::GOpaqueU(m_priv->m_node, output); } +cv::GFrame cv::GCall::yieldFrame(int output) +{ + return cv::GFrame(m_priv->m_node, output); +} + cv::GCall::Priv& cv::GCall::priv() { return *m_priv; @@ -78,3 +83,13 @@ const cv::GCall::Priv& cv::GCall::priv() const { return *m_priv; } + +cv::GKernel& cv::GCall::kernel() +{ + return m_priv->m_k; +} + +cv::util::any& cv::GCall::params() +{ + return m_priv->m_params; +} diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/api/gcall_priv.hpp b/inference-engine/thirdparty/fluid/modules/gapi/src/api/gcall_priv.hpp index edc2c225dc1..b142432c789 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/src/api/gcall_priv.hpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/api/gcall_priv.hpp @@ -42,10 +42,11 @@ class GCall::Priv { public: std::vector m_args; - const GKernel m_k; + GKernel m_k; // TODO: Rename to "constructionNode" or smt to reflect its lifetime GNode m_node; + cv::util::any m_params; explicit Priv(const GKernel &k); }; diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/api/gcomputation.cpp b/inference-engine/thirdparty/fluid/modules/gapi/src/api/gcomputation.cpp index 60119f717ac..5668cddc93c 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/src/api/gcomputation.cpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/api/gcomputation.cpp @@ -9,6 +9,7 @@ #include // remove_if #include // isspace (non-locale version) #include +#include // util::indexed #include "logger.hpp" // GAPI_LOG @@ -21,6 +22,7 @@ #include "compiler/gmodelbuilder.hpp" #include "compiler/gcompiler.hpp" +#include "compiler/gcompiled_priv.hpp" // cv::GComputation private implementation ///////////////////////////////////// // @@ -73,18 +75,18 @@ cv::GComputation::GComputation(cv::GProtoInputArgs &&ins, }; } -cv::GComputation::GComputation(cv::gimpl::s11n::I::IStream &is) +cv::GComputation::GComputation(cv::gapi::s11n::IIStream &is) : m_priv(new Priv()) { - m_priv->m_shape = gimpl::s11n::deserialize(is); + m_priv->m_shape = gapi::s11n::deserialize(is); } -void cv::GComputation::serialize(cv::gimpl::s11n::I::OStream &os) const +void cv::GComputation::serialize(cv::gapi::s11n::IOStream &os) const { // Build a basic GModel and write the whole thing to the stream auto pG = cv::gimpl::GCompiler::makeGraph(*m_priv); std::vector nhs(pG->nodes().begin(), pG->nodes().end()); - gimpl::s11n::serialize(os, *pG, nhs); + gapi::s11n::serialize(os, *pG, nhs); } @@ -129,15 +131,14 @@ static bool formats_are_same(const cv::GMetaArgs& metas1, const cv::GMetaArgs& m }); } -void cv::GComputation::apply(GRunArgs &&ins, GRunArgsP &&outs, GCompileArgs &&args) +void cv::GComputation::recompile(GMetaArgs&& in_metas, GCompileArgs &&args) { - const auto in_metas = descr_of(ins); // FIXME Graph should be recompiled when GCompileArgs have changed if (m_priv->m_lastMetas != in_metas) { if (m_priv->m_lastCompiled && - m_priv->m_lastCompiled.canReshape() && - formats_are_same(m_priv->m_lastMetas, in_metas)) + m_priv->m_lastCompiled.canReshape() && + formats_are_same(m_priv->m_lastMetas, in_metas)) { m_priv->m_lastCompiled.reshape(in_metas, args); } @@ -148,6 +149,11 @@ void cv::GComputation::apply(GRunArgs &&ins, GRunArgsP &&outs, GCompileArgs &&ar } m_priv->m_lastMetas = in_metas; } +} + +void cv::GComputation::apply(GRunArgs &&ins, GRunArgsP &&outs, GCompileArgs &&args) +{ + recompile(descr_of(ins), std::move(args)); m_priv->m_lastCompiled(std::move(ins), std::move(outs)); } @@ -165,6 +171,55 @@ void cv::GComputation::apply(const std::vector &ins, apply(std::move(call_ins), std::move(call_outs), std::move(args)); } +// NB: This overload is called from python code +cv::GRunArgs cv::GComputation::apply(GRunArgs &&ins, GCompileArgs &&args) +{ + recompile(descr_of(ins), std::move(args)); + + const auto& out_info = m_priv->m_lastCompiled.priv().outInfo(); + + GRunArgs run_args; + GRunArgsP outs; + run_args.reserve(out_info.size()); + outs.reserve(out_info.size()); + + for (auto&& info : out_info) + { + switch (info.shape) + { + case cv::GShape::GMAT: + { + run_args.emplace_back(cv::Mat{}); + outs.emplace_back(&cv::util::get(run_args.back())); + break; + } + case cv::GShape::GSCALAR: + { + run_args.emplace_back(cv::Scalar{}); + outs.emplace_back(&cv::util::get(run_args.back())); + break; + } + case cv::GShape::GARRAY: + { + switch (info.kind) + { + case cv::detail::OpaqueKind::CV_POINT2F: + run_args.emplace_back(cv::detail::VectorRef{std::vector{}}); + outs.emplace_back(cv::util::get(run_args.back())); + break; + default: + util::throw_error(std::logic_error("Unsupported kind for GArray")); + } + break; + } + default: + util::throw_error(std::logic_error("Only cv::GMat and cv::GScalar are supported for python output")); + } + } + m_priv->m_lastCompiled(std::move(ins), std::move(outs)); + return run_args; +} + #if !defined(GAPI_STANDALONE) void cv::GComputation::apply(cv::Mat in, cv::Mat &out, GCompileArgs &&args) { diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/api/gcomputation_priv.hpp b/inference-engine/thirdparty/fluid/modules/gapi/src/api/gcomputation_priv.hpp index c3160b4b2ea..19d89fdcbd1 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/src/api/gcomputation_priv.hpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/api/gcomputation_priv.hpp @@ -29,7 +29,7 @@ public: cv::GProtoArgs m_outs; }; - using Dump = cv::gimpl::s11n::GSerialized; + using Dump = cv::gapi::s11n::GSerialized; using Shape = cv::util::variant < Expr // An expression-based graph diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/api/gframe.cpp b/inference-engine/thirdparty/fluid/modules/gapi/src/api/gframe.cpp index 405924bb0d4..1acaa9b7663 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/src/api/gframe.cpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/api/gframe.cpp @@ -8,21 +8,17 @@ #include "precomp.hpp" #include +#include #include "api/gorigin.hpp" // cv::GFrame public implementation ////////////////////////////////////////////// cv::GFrame::GFrame() - : m_priv(new GOrigin(GShape::GMAT, GNode::Param())) { - // N.B.: The shape here is still GMAT as currently cv::Mat is used - // as an underlying host type. Will be changed to GFRAME once - // GExecutor & GStreamingExecutor & selected backends will be extended - // to support cv::MediaFrame. + : m_priv(new GOrigin(GShape::GFRAME, GNode::Param())) { } cv::GFrame::GFrame(const GNode &n, std::size_t out) - : m_priv(new GOrigin(GShape::GMAT, n, out)) { - // N.B.: GMAT is here for the same reason as above ^ + : m_priv(new GOrigin(GShape::GFRAME, n, out)) { } cv::GOrigin& cv::GFrame::priv() { @@ -34,7 +30,23 @@ const cv::GOrigin& cv::GFrame::priv() const { } namespace cv { -std::ostream& operator<<(std::ostream& os, const cv::GFrameDesc &) { + +bool GFrameDesc::operator== (const GFrameDesc &rhs) const { + return fmt == rhs.fmt && size == rhs.size; +} + +GFrameDesc descr_of(const cv::MediaFrame &frame) { + return frame.desc(); +} + +std::ostream& operator<<(std::ostream& os, const cv::GFrameDesc &d) { + os << '['; + switch (d.fmt) { + case MediaFormat::BGR: os << "BGR"; break; + case MediaFormat::NV12: os << "NV12"; break; + default: GAPI_Assert(false && "Invalid media format"); + } + os << ' ' << d.size << ']'; return os; } diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/api/ginfer.cpp b/inference-engine/thirdparty/fluid/modules/gapi/src/api/ginfer.cpp index 98eeef5ab68..f4bd1c3abb4 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/src/api/ginfer.cpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/api/ginfer.cpp @@ -16,8 +16,8 @@ #include -cv::gapi::GNetPackage::GNetPackage(std::initializer_list &&ii) - : networks(std::move(ii)) { +cv::gapi::GNetPackage::GNetPackage(std::initializer_list ii) + : networks(ii) { } std::vector cv::gapi::GNetPackage::backends() const { @@ -25,3 +25,59 @@ std::vector cv::gapi::GNetPackage::backends() const { for (const auto &nn : networks) unique_set.insert(nn.backend); return std::vector(unique_set.begin(), unique_set.end()); } + +// FIXME: Inference API is currently only available in full mode +#if !defined(GAPI_STANDALONE) + +cv::GInferInputs::GInferInputs() + : in_blobs(std::make_shared()) +{ +} + +cv::GMat& cv::GInferInputs::operator[](const std::string& name) { + return (*in_blobs)[name]; +} + +const cv::GInferInputs::Map& cv::GInferInputs::getBlobs() const { + return *in_blobs; +} + +void cv::GInferInputs::setInput(const std::string& name, const cv::GMat& value) { + in_blobs->emplace(name, value); +} + +struct cv::GInferOutputs::Priv +{ + Priv(std::shared_ptr); + + std::shared_ptr call; + InOutInfo* info = nullptr; + std::unordered_map out_blobs; +}; + +cv::GInferOutputs::Priv::Priv(std::shared_ptr c) + : call(std::move(c)), info(cv::util::any_cast(&call->params())) +{ +} + +cv::GInferOutputs::GInferOutputs(std::shared_ptr call) + : m_priv(std::make_shared(std::move(call))) +{ +} + +cv::GMat cv::GInferOutputs::at(const std::string& name) +{ + auto it = m_priv->out_blobs.find(name); + if (it == m_priv->out_blobs.end()) { + // FIXME: Avoid modifying GKernel + // Expect output to be always GMat + m_priv->call->kernel().outShapes.push_back(cv::GShape::GMAT); + // ...so _empty_ constructor is passed here. + m_priv->call->kernel().outCtors.emplace_back(cv::util::monostate{}); + int out_idx = static_cast(m_priv->out_blobs.size()); + it = m_priv->out_blobs.emplace(name, m_priv->call->yield(out_idx)).first; + m_priv->info->out_names.push_back(name); + } + return it->second; +} +#endif // GAPI_STANDALONE diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/api/gmat.cpp b/inference-engine/thirdparty/fluid/modules/gapi/src/api/gmat.cpp index de33c39b0d0..47a246c293d 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/src/api/gmat.cpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/api/gmat.cpp @@ -36,6 +36,38 @@ const cv::GOrigin& cv::GMat::priv() const return *m_priv; } +static std::vector checkVectorImpl(const int width, const int height, const int chan, + const int n) +{ + if (width == 1 && (n == -1 || n == chan)) + { + return {height, chan}; + } + else if (height == 1 && (n == -1 || n == chan)) + { + return {width, chan}; + } + else if (chan == 1 && (n == -1 || n == width)) + { + return {height, width}; + } + else // input Mat can't be described as vector of points of given dimensionality + { + return {-1, -1}; + } +} + +int cv::gapi::detail::checkVector(const cv::GMatDesc& in, const size_t n) +{ + GAPI_Assert(n != 0u); + return checkVectorImpl(in.size.width, in.size.height, in.chan, static_cast(n))[0]; +} + +std::vector cv::gapi::detail::checkVector(const cv::GMatDesc& in) +{ + return checkVectorImpl(in.size.width, in.size.height, in.chan, -1); +} + namespace{ template cv::GMetaArgs vec_descr_of(const std::vector &vec) { @@ -95,6 +127,11 @@ cv::GMetaArgs cv::gapi::own::descrs_of(const std::vector &vec) return vec_descr_of(vec); } +cv::GMatDesc cv::descr_of(const cv::RMat &mat) +{ + return mat.desc(); +} + namespace cv { std::ostream& operator<<(std::ostream& os, const cv::GMatDesc &desc) { @@ -137,4 +174,9 @@ bool GMatDesc::canDescribe(const cv::Mat& mat) const return canDescribeHelper(*this, mat); } +bool GMatDesc::canDescribe(const cv::RMat& mat) const +{ + return canDescribeHelper(*this, mat); +} + }// namespace cv diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/api/gproto.cpp b/inference-engine/thirdparty/fluid/modules/gapi/src/api/gproto.cpp index ef5162a58c0..0c7c6462eea 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/src/api/gproto.cpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/api/gproto.cpp @@ -119,6 +119,12 @@ cv::GMetaArg cv::descr_of(const cv::GRunArg &arg) case GRunArg::index_of(): return cv::util::get(arg)->descr_of(); + case GRunArg::index_of(): + return cv::GMetaArg(cv::util::get(arg).desc()); + + case GRunArg::index_of(): + return cv::GMetaArg(cv::util::get(arg).desc()); + default: util::throw_error(std::logic_error("Unsupported GRunArg type")); } } @@ -130,6 +136,7 @@ cv::GMetaArgs cv::descr_of(const cv::GRunArgs &args) return metas; } +// FIXME: Is it tested for all types? cv::GMetaArg cv::descr_of(const cv::GRunArgP &argp) { switch (argp.index()) @@ -139,12 +146,14 @@ cv::GMetaArg cv::descr_of(const cv::GRunArgP &argp) #endif // !defined(GAPI_STANDALONE) case GRunArgP::index_of(): return GMetaArg(cv::descr_of(*util::get(argp))); case GRunArgP::index_of(): return GMetaArg(descr_of(*util::get(argp))); + case GRunArgP::index_of(): return GMetaArg(descr_of(*util::get(argp))); case GRunArgP::index_of(): return GMetaArg(util::get(argp).descr_of()); case GRunArgP::index_of(): return GMetaArg(util::get(argp).descr_of()); default: util::throw_error(std::logic_error("Unsupported GRunArgP type")); } } +// FIXME: Is it tested for all types?? bool cv::can_describe(const GMetaArg& meta, const GRunArgP& argp) { switch (argp.index()) @@ -155,12 +164,14 @@ bool cv::can_describe(const GMetaArg& meta, const GRunArgP& argp) case GRunArgP::index_of(): return util::holds_alternative(meta) && util::get(meta).canDescribe(*util::get(argp)); case GRunArgP::index_of(): return meta == GMetaArg(cv::descr_of(*util::get(argp))); + case GRunArgP::index_of(): return meta == GMetaArg(cv::descr_of(*util::get(argp))); case GRunArgP::index_of(): return meta == GMetaArg(util::get(argp).descr_of()); case GRunArgP::index_of(): return meta == GMetaArg(util::get(argp).descr_of()); default: util::throw_error(std::logic_error("Unsupported GRunArgP type")); } } +// FIXME: Is it tested for all types?? bool cv::can_describe(const GMetaArg& meta, const GRunArg& arg) { switch (arg.index()) @@ -174,6 +185,9 @@ bool cv::can_describe(const GMetaArg& meta, const GRunArg& arg) case GRunArg::index_of(): return meta == cv::GMetaArg(util::get(arg).descr_of()); case GRunArg::index_of(): return meta == cv::GMetaArg(util::get(arg).descr_of()); case GRunArg::index_of(): return util::holds_alternative(meta); // FIXME(?) may be not the best option + case GRunArg::index_of(): return util::holds_alternative(meta) && + util::get(meta).canDescribe(cv::util::get(arg)); + case GRunArg::index_of(): return meta == cv::GMetaArg(util::get(arg).desc()); default: util::throw_error(std::logic_error("Unsupported GRunArg type")); } } @@ -187,6 +201,8 @@ bool cv::can_describe(const GMetaArgs &metas, const GRunArgs &args) }); } +// FIXME: Is it tested for all types? +// FIXME: Where does this validation happen?? void cv::validate_input_arg(const GRunArg& arg) { // FIXME: It checks only Mat argument @@ -243,6 +259,11 @@ std::ostream& operator<<(std::ostream& os, const cv::GMetaArg &arg) case cv::GMetaArg::index_of(): os << util::get(arg); break; + + case cv::GMetaArg::index_of(): + os << util::get(arg); + break; + default: GAPI_Assert(false); } @@ -263,10 +284,14 @@ const void* cv::gimpl::proto::ptr(const GRunArgP &arg) return static_cast(cv::util::get(arg)); case GRunArgP::index_of(): return static_cast(cv::util::get(arg)); + case GRunArgP::index_of(): + return static_cast(cv::util::get(arg)); case GRunArgP::index_of(): return cv::util::get(arg).ptr(); case GRunArgP::index_of(): return cv::util::get(arg).ptr(); + case GRunArgP::index_of(): + return static_cast(cv::util::get(arg)); default: util::throw_error(std::logic_error("Unknown GRunArgP type!")); } diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/api/grunarg.cpp b/inference-engine/thirdparty/fluid/modules/gapi/src/api/grunarg.cpp new file mode 100644 index 00000000000..30ae2adbc09 --- /dev/null +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/api/grunarg.cpp @@ -0,0 +1,33 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Copyright (C) 2020 Intel Corporation + +#include "precomp.hpp" +#include + +cv::GRunArg::GRunArg() { +} + +cv::GRunArg::GRunArg(const cv::GRunArg &arg) + : cv::GRunArgBase(static_cast(arg)) + , meta(arg.meta) { +} + +cv::GRunArg::GRunArg(cv::GRunArg &&arg) + : cv::GRunArgBase(std::move(static_cast(arg))) + , meta(std::move(arg.meta)) { +} + +cv::GRunArg& cv::GRunArg::operator= (const cv::GRunArg &arg) { + cv::GRunArgBase::operator=(static_cast(arg)); + meta = arg.meta; + return *this; +} + +cv::GRunArg& cv::GRunArg::operator= (cv::GRunArg &&arg) { + cv::GRunArgBase::operator=(std::move(static_cast(arg))); + meta = std::move(arg.meta); + return *this; +} diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/api/kernels_core.cpp b/inference-engine/thirdparty/fluid/modules/gapi/src/api/kernels_core.cpp index 961d19cdaaf..15af915bdd0 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/src/api/kernels_core.cpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/api/kernels_core.cpp @@ -2,7 +2,7 @@ // It is subject to the license terms in the LICENSE file found in the top-level directory // of this distribution and at http://opencv.org/license.html. // -// Copyright (C) 2018 Intel Corporation +// Copyright (C) 2018-2020 Intel Corporation #include "precomp.hpp" @@ -234,6 +234,11 @@ GScalar sum(const GMat& src) return core::GSum::on(src); } +GOpaque countNonZero(const GMat& src) +{ + return core::GCountNonZero::on(src); +} + GMat addWeighted(const GMat& src1, double alpha, const GMat& src2, double beta, double gamma, int dtype) { return core::GAddW::on(src1, alpha, src2, beta, gamma, dtype); @@ -383,14 +388,48 @@ GMat warpAffine(const GMat& src, const Mat& M, const Size& dsize, int flags, return core::GWarpAffine::on(src, M, dsize, flags, borderMode, borderValue); } -GOpaque size(const GMat& src) +std::tuple,GMat,GMat> kmeans(const GMat& data, const int K, const GMat& bestLabels, + const TermCriteria& criteria, const int attempts, + const KmeansFlags flags) { - return core::GSize::on(src); + return core::GKMeansND::on(data, K, bestLabels, criteria, attempts, flags); } -GOpaque size(const GOpaque& r) +std::tuple,GMat,GMat> kmeans(const GMat& data, const int K, + const TermCriteria& criteria, const int attempts, + const KmeansFlags flags) { - return core::GSizeR::on(r); + return core::GKMeansNDNoInit::on(data, K, criteria, attempts, flags); +} + +std::tuple,GArray,GArray> kmeans(const GArray& data, + const int K, + const GArray& bestLabels, + const TermCriteria& criteria, + const int attempts, + const KmeansFlags flags) +{ + return core::GKMeans2D::on(data, K, bestLabels, criteria, attempts, flags); +} + +std::tuple,GArray,GArray> kmeans(const GArray& data, + const int K, + const GArray& bestLabels, + const TermCriteria& criteria, + const int attempts, + const KmeansFlags flags) +{ + return core::GKMeans3D::on(data, K, bestLabels, criteria, attempts, flags); +} + +GOpaque streaming::size(const GMat& src) +{ + return streaming::GSize::on(src); +} + +GOpaque streaming::size(const GOpaque& r) +{ + return streaming::GSizeR::on(r); } } //namespace gapi diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/api/kernels_imgproc.cpp b/inference-engine/thirdparty/fluid/modules/gapi/src/api/kernels_imgproc.cpp index 108eefcb816..41085a7ebf3 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/src/api/kernels_imgproc.cpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/api/kernels_imgproc.cpp @@ -73,6 +73,13 @@ GMat dilate3x3(const GMat& src, int iterations, return dilate(src, cv::Mat(), cv::Point(-1,-1), iterations, borderType, borderValue); } +GMat morphologyEx(const GMat &src, const MorphTypes op, const Mat &kernel, const Point &anchor, + const int iterations, const BorderTypes borderType, const Scalar &borderValue) +{ + return imgproc::GMorphologyEx::on(src, op, kernel, anchor, iterations, + borderType, borderValue); +} + GMat Sobel(const GMat& src, int ddepth, int dx, int dy, int ksize, double scale, double delta, int borderType, const Scalar& bordVal) @@ -115,6 +122,101 @@ cv::GArray goodFeaturesToTrack(const GMat& image, int maxCorners, d useHarrisDetector, k); } +GArray> +findContours(const GMat &src, const RetrievalModes mode, const ContourApproximationModes method, + const GOpaque &offset) +{ + return imgproc::GFindContours::on(src, mode, method, offset); +} + +GArray> +findContours(const GMat &src, const RetrievalModes mode, const ContourApproximationModes method) +{ + return imgproc::GFindContoursNoOffset::on(src, mode, method); +} + + +std::tuple>,GArray> +findContoursH(const GMat &src, const RetrievalModes mode, const ContourApproximationModes method, + const GOpaque &offset) +{ + return imgproc::GFindContoursH::on(src, mode, method, offset); +} + +std::tuple>,GArray> +findContoursH(const GMat &src, const RetrievalModes mode, const ContourApproximationModes method) +{ + return imgproc::GFindContoursHNoOffset::on(src, mode, method); +} + +GOpaque boundingRect(const GMat& src) +{ + return imgproc::GBoundingRectMat::on(src); +} + +GOpaque boundingRect(const GArray& src) +{ + return imgproc::GBoundingRectVector32S::on(src); +} + +GOpaque boundingRect(const GArray& src) +{ + return imgproc::GBoundingRectVector32F::on(src); +} + +GOpaque fitLine2D(const GMat& src, const DistanceTypes distType, const double param, + const double reps, const double aeps) +{ + return imgproc::GFitLine2DMat::on(src, distType, param, reps, aeps); +} + +GOpaque fitLine2D(const GArray& src, const DistanceTypes distType, + const double param, const double reps, const double aeps) +{ + return imgproc::GFitLine2DVector32S::on(src, distType, param, reps, aeps); +} + +GOpaque fitLine2D(const GArray& src, const DistanceTypes distType, + const double param, const double reps, const double aeps) +{ + return imgproc::GFitLine2DVector32F::on(src, distType, param, reps, aeps); +} + +GOpaque fitLine2D(const GArray& src, const DistanceTypes distType, + const double param, const double reps, const double aeps) +{ + return imgproc::GFitLine2DVector64F::on(src, distType, param, reps, aeps); +} + +GOpaque fitLine3D(const GMat& src, const DistanceTypes distType, const double param, + const double reps, const double aeps) +{ + return imgproc::GFitLine3DMat::on(src, distType, param, reps, aeps); +} + +GOpaque fitLine3D(const GArray& src, const DistanceTypes distType, + const double param, const double reps, const double aeps) +{ + return imgproc::GFitLine3DVector32S::on(src, distType, param, reps, aeps); +} + +GOpaque fitLine3D(const GArray& src, const DistanceTypes distType, + const double param, const double reps, const double aeps) +{ + return imgproc::GFitLine3DVector32F::on(src, distType, param, reps, aeps); +} + +GOpaque fitLine3D(const GArray& src, const DistanceTypes distType, + const double param, const double reps, const double aeps) +{ + return imgproc::GFitLine3DVector64F::on(src, distType, param, reps, aeps); +} + +GMat BGR2RGB(const GMat& src) +{ + return imgproc::GBGR2RGB::on(src); +} + GMat RGB2Gray(const GMat& src) { return imgproc::GRGB2Gray::on(src); @@ -160,6 +262,26 @@ GMat YUV2RGB(const GMat& src) return imgproc::GYUV2RGB::on(src); } +GMat BGR2I420(const GMat& src) +{ + return imgproc::GBGR2I420::on(src); +} + +GMat RGB2I420(const GMat& src) +{ + return imgproc::GRGB2I420::on(src); +} + +GMat I4202BGR(const GMat& src) +{ + return imgproc::GI4202BGR::on(src); +} + +GMat I4202RGB(const GMat& src) +{ + return imgproc::GI4202RGB::on(src); +} + GMat NV12toRGB(const GMat& src_y, const GMat& src_uv) { return imgproc::GNV12toRGB::on(src_y, src_uv); diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/api/kernels_streaming.cpp b/inference-engine/thirdparty/fluid/modules/gapi/src/api/kernels_streaming.cpp new file mode 100644 index 00000000000..66bf27260d9 --- /dev/null +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/api/kernels_streaming.cpp @@ -0,0 +1,84 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Copyright (C) 2020 Intel Corporation + +#include "precomp.hpp" + +#include +#include + +#include + +cv::GMat cv::gapi::streaming::desync(const cv::GMat &g) { + // FIXME: this is a limited implementation of desync + // The real implementation must be generic (template) and + // reside in desync.hpp (and it is detail::desync<>()) + + // FIXME: Put a copy here to solve the below problem + // FIXME: Because of the copy, the desync functionality is limited + // to GMat only (we don't have generic copy kernel for other + // object types) + return cv::gapi::copy(detail::desync(g)); + + // FIXME + // + // If consumed by multiple different islands (OCV and Fluid by + // example, an object needs to be desynchronized individually + // for every path. + // + // This is a limitation of the current implementation. It works + // this way: every "desync" link from the main path to a new + // desync path gets its "DesyncQueue" object which stores only the + // last value written before of the desync object (DO) it consumes + // (the container of type "last written value" or LWV. + // + // LWV + // [Sync path] -> desync() - - > DO -> [ISL0 @ Desync path #1] + // + // At the same time, generally, every island in the streaming + // graph gets its individual input as a queue (so normally, a + // writer pushes the same output MULTIPLE TIMES if it has mutliple + // readers): + // + // LWV + // [Sync path] -> desync() - - > DO1 -> [ISL0 @ Desync path #1] + // : LWV + // ' - - > DO2 -> [ISL1 @ Desync path #1] + // + // For users, it may seem legit to use desync here only once, and + // it MUST BE legit once the problem is fixed. + // But the problem with the current implementation is that islands + // on the same desync path get different desync queues and in fact + // stay desynchronized between each other. One shouldn't consider + // this as a single desync path anymore. + // If these two ISLs are then merged e.g. with add(a,b), the + // results will be inconsistent, given that the latency of ISL0 + // and ISL1 may be different. This is not the same frame anymore + // coming as `a` and `b` to add(a,b) because of it. + // + // To make things clear, we forbid this now and ask to call + // desync one more time to allow that. It is bad since the graph + // structure and island layout depends on kernel packages used, + // not on the sole GComputation structure. This needs to be fixed! + // Here's the working configuration: + // + // LWV + // [Sync path] -> desync() - - > DO1 -> [ISL0 @ Desync path #1] + // : LWV + // '-> desync() - - > DO2 -> [ISL1 @ Desync path #2] <-(!) + // + // Put an operation right after desync() is a quick workaround to + // this synchronization problem. There will be one "last_written_value" + // connected to a desynchronized data object, and this sole last_written_value + // object will feed both branches of the streaming executable. +} + +cv::GFrame cv::gapi::streaming::copy(const cv::GFrame& in) { + return cv::gapi::streaming::GCopy::on(in); +} + +cv::GMat cv::gapi::streaming::BGR(const cv::GFrame& in) { + return cv::gapi::streaming::GBGR::on(in); +} diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/api/kernels_video.cpp b/inference-engine/thirdparty/fluid/modules/gapi/src/api/kernels_video.cpp index eff6d488748..5eeaef22340 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/src/api/kernels_video.cpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/api/kernels_video.cpp @@ -52,5 +52,68 @@ GOptFlowLKOutput calcOpticalFlowPyrLK(const cv::GArray &prevPyr, criteria, flags, minEigThresh); } +GMat BackgroundSubtractor(const GMat& src, const BackgroundSubtractorParams& bsp) +{ + return GBackgroundSubtractor::on(src, bsp); +} + +GMat KalmanFilter(const GMat& m, const cv::GOpaque& have_m, const GMat& c, const KalmanParams& kp) +{ + return GKalmanFilter::on(m, have_m, c, kp); +} + +GMat KalmanFilter(const GMat& m, const cv::GOpaque& have_m, const KalmanParams& kp) +{ + return GKalmanFilterNoControl::on(m, have_m, kp); +} + +namespace video { +void checkParams(const cv::gapi::KalmanParams& kfParams, + const cv::GMatDesc& measurement, const cv::GMatDesc& control) +{ + int type = kfParams.transitionMatrix.type(); + GAPI_Assert(type == CV_32FC1 || type == CV_64FC1); + int depth = CV_MAT_DEPTH(type); + + bool controlCapable = !(control == GMatDesc{}); + + if (controlCapable) + { + GAPI_Assert(!kfParams.controlMatrix.empty()); + GAPI_Assert(control.depth == depth && control.chan == 1 && + control.size.height == kfParams.controlMatrix.cols && + control.size.width == 1); + } + else + GAPI_Assert(kfParams.controlMatrix.empty()); + + GAPI_Assert(!kfParams.state.empty() && kfParams.state.type() == type); + GAPI_Assert(!kfParams.errorCov.empty() && kfParams.errorCov.type() == type); + GAPI_Assert(!kfParams.transitionMatrix.empty() && kfParams.transitionMatrix.type() == type); + GAPI_Assert(!kfParams.processNoiseCov.empty() && kfParams.processNoiseCov.type() == type); + GAPI_Assert(!kfParams.measurementNoiseCov.empty() && kfParams.measurementNoiseCov.type() == type); + GAPI_Assert(!kfParams.measurementMatrix.empty() && kfParams.measurementMatrix.type() == type); + GAPI_Assert(measurement.depth == depth && measurement.chan == 1); + + int dDim = kfParams.transitionMatrix.cols; + GAPI_Assert(kfParams.transitionMatrix.rows == dDim); + + GAPI_Assert(kfParams.processNoiseCov.cols == dDim && + kfParams.processNoiseCov.rows == dDim); + GAPI_Assert(kfParams.errorCov.cols == dDim && kfParams.errorCov.rows == dDim); + GAPI_Assert(kfParams.state.rows == dDim && kfParams.state.cols == 1); + GAPI_Assert(kfParams.measurementMatrix.cols == dDim); + + int mDim = kfParams.measurementMatrix.rows; + GAPI_Assert(kfParams.measurementNoiseCov.cols == mDim && + kfParams.measurementNoiseCov.rows == mDim); + + if (controlCapable) + GAPI_Assert(kfParams.controlMatrix.rows == dDim); + + GAPI_Assert(measurement.size.height == mDim && + measurement.size.width == 1); +} +} // namespace video } //namespace gapi } //namespace cv diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/api/media.cpp b/inference-engine/thirdparty/fluid/modules/gapi/src/api/media.cpp new file mode 100644 index 00000000000..212902ee3b9 --- /dev/null +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/api/media.cpp @@ -0,0 +1,42 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Copyright (C) 2020 Intel Corporation + +#include "precomp.hpp" +#include + +struct cv::MediaFrame::Priv { + std::unique_ptr adapter; +}; + +cv::MediaFrame::MediaFrame() { +} + +cv::MediaFrame::MediaFrame(AdapterPtr &&ptr) + : m(new Priv{std::move(ptr)}) { +} + +cv::GFrameDesc cv::MediaFrame::desc() const { + return m->adapter->meta(); +} + +cv::MediaFrame::View cv::MediaFrame::access(Access code) const { + return m->adapter->access(code); +} + +cv::MediaFrame::View::View(Ptrs&& ptrs, Strides&& strs, Callback &&cb) + : ptr (std::move(ptrs)) + , stride(std::move(strs)) + , m_cb (std::move(cb)) { +} + +cv::MediaFrame::View::~View() { + if (m_cb) { + m_cb(); + } +} + +cv::MediaFrame::IAdapter::~IAdapter() { +} diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/api/render_ocv.cpp b/inference-engine/thirdparty/fluid/modules/gapi/src/api/render_ocv.cpp index a298a958bd6..5ab2e1dd07c 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/src/api/render_ocv.cpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/api/render_ocv.cpp @@ -2,7 +2,7 @@ #include // Kernel API's #include "api/render_ocv.hpp" -#include "api/ft_render.hpp" +#include "backends/render/ft_render.hpp" namespace cv { @@ -146,12 +146,8 @@ struct EmptyConverter template void drawPrimitivesOCV(cv::Mat& in, const cv::gapi::wip::draw::Prims& prims, - cv::gapi::wip::draw::FTTextRender* ftpr) + std::shared_ptr& ftpr) { -#ifndef HAVE_FREETYPE - cv::util::suppress_unused_warning(ftpr); -#endif - using namespace cv::gapi::wip::draw; ColorConverter converter; @@ -177,7 +173,6 @@ void drawPrimitivesOCV(cv::Mat& in, case Prim::index_of(): { -#ifdef HAVE_FREETYPE const auto& ftp = cv::util::get(p); const auto color = converter.cvtColor(ftp.color); @@ -196,9 +191,6 @@ void drawPrimitivesOCV(cv::Mat& in, cv::Point tl(ftp.org.x, ftp.org.y - mask.size().height + baseline); blendTextMask(in, mask, tl, color); -#else - cv::util::throw_error(std::runtime_error("FreeType not found !")); -#endif break; } @@ -251,16 +243,16 @@ void drawPrimitivesOCV(cv::Mat& in, } } -void drawPrimitivesOCVBGR(cv::Mat &in, - const cv::gapi::wip::draw::Prims &prims, - cv::gapi::wip::draw::FTTextRender* ftpr) +void drawPrimitivesOCVBGR(cv::Mat &in, + const cv::gapi::wip::draw::Prims &prims, + std::shared_ptr &ftpr) { drawPrimitivesOCV(in, prims, ftpr); } -void drawPrimitivesOCVYUV(cv::Mat &in, - const cv::gapi::wip::draw::Prims &prims, - cv::gapi::wip::draw::FTTextRender* ftpr) +void drawPrimitivesOCVYUV(cv::Mat &in, + const cv::gapi::wip::draw::Prims &prims, + std::shared_ptr &ftpr) { drawPrimitivesOCV(in, prims, ftpr); } diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/api/render_ocv.hpp b/inference-engine/thirdparty/fluid/modules/gapi/src/api/render_ocv.hpp index 91194dcdc1a..a9a98f93fb0 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/src/api/render_ocv.hpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/api/render_ocv.hpp @@ -1,6 +1,6 @@ #include #include "render_priv.hpp" -#include "ft_render.hpp" +#include "backends/render/ft_render.hpp" #ifndef OPENCV_RENDER_OCV_HPP #define OPENCV_RENDER_OCV_HPP @@ -15,8 +15,8 @@ namespace draw { // FIXME only for tests -void GAPI_EXPORTS drawPrimitivesOCVYUV(cv::Mat& yuv, const Prims& prims, cv::gapi::wip::draw::FTTextRender* mc); -void GAPI_EXPORTS drawPrimitivesOCVBGR(cv::Mat& bgr, const Prims& prims, cv::gapi::wip::draw::FTTextRender* mc); +void GAPI_EXPORTS drawPrimitivesOCVYUV(cv::Mat& yuv, const Prims& prims, std::shared_ptr& mc); +void GAPI_EXPORTS drawPrimitivesOCVBGR(cv::Mat& bgr, const Prims& prims, std::shared_ptr& mc); } // namespace draw } // namespace wip diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/api/rmat.cpp b/inference-engine/thirdparty/fluid/modules/gapi/src/api/rmat.cpp new file mode 100644 index 00000000000..12ba4e5e0e3 --- /dev/null +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/api/rmat.cpp @@ -0,0 +1,75 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Copyright (C) 2020 Intel Corporation + +#include + +using View = cv::RMat::View; + +namespace { +cv::GMatDesc checkDesc(const cv::GMatDesc& desc) { + if (!desc.dims.empty() && desc.chan != -1) { + cv::util::throw_error( + std::logic_error("Multidimesional RMat::Views with chan different from -1 are not supported!")); + } + return desc; +} + +int typeFromDesc(const cv::GMatDesc& desc) { + // In multidimensional case GMatDesc::chan is -1, + // change it to 1 when calling CV_MAKE_TYPE + return CV_MAKE_TYPE(desc.depth, desc.chan == -1 ? 1 : desc.chan); +} + +static View::stepsT defaultSteps(const cv::GMatDesc& desc) { + const auto& dims = desc.dims.empty() + ? std::vector{desc.size.height, desc.size.width} + : desc.dims; + View::stepsT steps(dims.size(), 0u); + auto type = typeFromDesc(desc); + steps.back() = CV_ELEM_SIZE(type); + for (int i = static_cast(dims.size())-2; i >= 0; i--) { + steps[i] = steps[i+1]*dims[i]; + } + return steps; +} +} // anonymous namespace + +View::View(const cv::GMatDesc& desc, uchar* data, size_t step, DestroyCallback&& cb) + : m_desc(checkDesc(desc)) + , m_data(data) + , m_steps([this, step](){ + GAPI_Assert(m_desc.dims.empty()); + auto steps = defaultSteps(m_desc); + if (step != 0u) { + steps[0] = step; + } + return steps; + }()) + , m_cb(std::move(cb)) { +} + +View::View(const cv::GMatDesc& desc, uchar* data, const stepsT &steps, DestroyCallback&& cb) + : m_desc(checkDesc(desc)) + , m_data(data) + , m_steps(steps == stepsT{} ? defaultSteps(m_desc): steps) + , m_cb(std::move(cb)) { +} + +int View::type() const { return typeFromDesc(m_desc); } + +// There is an issue with default generated operator=(View&&) on Mac: +// it doesn't nullify m_cb of the moved object +View& View::operator=(View&& v) { + m_desc = v.m_desc; + m_data = v.m_data; + m_steps = v.m_steps; + m_cb = v.m_cb; + v.m_desc = {}; + v.m_data = nullptr; + v.m_steps = {0u}; + v.m_cb = nullptr; + return *this; +} diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/api/s11n.cpp b/inference-engine/thirdparty/fluid/modules/gapi/src/api/s11n.cpp index b56c34fbdd1..b6acf28ea4f 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/src/api/s11n.cpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/api/s11n.cpp @@ -10,40 +10,47 @@ #include "backends/common/serialization.hpp" std::vector cv::gapi::serialize(const cv::GComputation &c) { - cv::gimpl::s11n::ByteMemoryOutStream os; + cv::gapi::s11n::ByteMemoryOutStream os; c.serialize(os); return os.data(); } cv::GComputation cv::gapi::detail::getGraph(const std::vector &p) { - cv::gimpl::s11n::ByteMemoryInStream is(p); + cv::gapi::s11n::ByteMemoryInStream is(p); return cv::GComputation(is); } cv::GMetaArgs cv::gapi::detail::getMetaArgs(const std::vector &p) { - cv::gimpl::s11n::ByteMemoryInStream is(p); + cv::gapi::s11n::ByteMemoryInStream is(p); return meta_args_deserialize(is); } cv::GRunArgs cv::gapi::detail::getRunArgs(const std::vector &p) { - cv::gimpl::s11n::ByteMemoryInStream is(p); + cv::gapi::s11n::ByteMemoryInStream is(p); return run_args_deserialize(is); } std::vector cv::gapi::serialize(const cv::GMetaArgs& ma) { - cv::gimpl::s11n::ByteMemoryOutStream os; + cv::gapi::s11n::ByteMemoryOutStream os; serialize(os, ma); return os.data(); } std::vector cv::gapi::serialize(const cv::GRunArgs& ra) { - cv::gimpl::s11n::ByteMemoryOutStream os; + cv::gapi::s11n::ByteMemoryOutStream os; serialize(os, ra); return os.data(); } +std::vector cv::gapi::serialize(const cv::GCompileArgs& ca) +{ + cv::gapi::s11n::ByteMemoryOutStream os; + serialize(os, ca); + return os.data(); +} + // FIXME: This function should move from S11N to GRunArg-related entities. // it has nothing to do with the S11N as it is cv::GRunArgsP cv::gapi::bind(cv::GRunArgs &results) @@ -72,6 +79,9 @@ cv::GRunArgsP cv::gapi::bind(cv::GRunArgs &results) case T::index_of() : outputs.emplace_back(cv::util::get(res_obj)); break; + case cv::GRunArg::index_of() : + outputs.emplace_back((cv::RMat*)(&(cv::util::get(res_obj)))); + break; default: GAPI_Assert(false && "This value type is not supported!"); // ...maybe because of STANDALONE mode. break; @@ -105,6 +115,9 @@ cv::GRunArg cv::gapi::bind(cv::GRunArgP &out) case T::index_of() : return cv::GRunArg(*cv::util::get(out)); + case T::index_of() : + return cv::GRunArg(*cv::util::get(out)); + default: // ...maybe our types were extended GAPI_Assert(false && "This value type is UNKNOWN!"); diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/backends/common/gbackend.hpp b/inference-engine/thirdparty/fluid/modules/gapi/src/backends/common/gbackend.hpp index e9a44c44f8d..576168db533 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/src/backends/common/gbackend.hpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/backends/common/gbackend.hpp @@ -22,6 +22,38 @@ namespace cv { namespace gimpl { + inline cv::Mat asMat(RMat::View& v) { +#if !defined(GAPI_STANDALONE) + return v.dims().empty() ? cv::Mat(v.rows(), v.cols(), v.type(), v.ptr(), v.step()) + : cv::Mat(v.dims(), v.type(), v.ptr(), v.steps().data()); +#else + // FIXME: add a check that steps are default + return v.dims().empty() ? cv::Mat(v.rows(), v.cols(), v.type(), v.ptr(), v.step()) + : cv::Mat(v.dims(), v.type(), v.ptr()); + +#endif + } + inline RMat::View asView(const Mat& m, RMat::View::DestroyCallback&& cb = nullptr) { +#if !defined(GAPI_STANDALONE) + RMat::View::stepsT steps(m.dims); + for (int i = 0; i < m.dims; i++) { + steps[i] = m.step[i]; + } + return RMat::View(cv::descr_of(m), m.data, steps, std::move(cb)); +#else + return RMat::View(cv::descr_of(m), m.data, m.step, std::move(cb)); +#endif + } + + class RMatAdapter : public RMat::Adapter { + cv::Mat m_mat; + public: + const void* data() const { return m_mat.data; } + RMatAdapter(cv::Mat m) : m_mat(m) {} + virtual RMat::View access(RMat::Access) override { return asView(m_mat); } + virtual cv::GMatDesc desc() const override { return cv::descr_of(m_mat); } + }; + // Forward declarations struct Data; struct RcDesc; @@ -30,6 +62,8 @@ namespace magazine { template struct Class { template using MapT = std::unordered_map; + using MapM = std::unordered_map; + template MapT& slot() { return std::get::value>(slots); @@ -38,26 +72,61 @@ namespace magazine { { return std::get::value>(slots); } + template MapM& meta() + { + return metas[ade::util::type_list_index::value]; + } + template const MapM& meta() const + { + return metas[ade::util::type_list_index::value]; + } private: std::tuple...> slots; + std::array metas; }; } // namespace magazine + +using Mag = magazine::Class< cv::Mat + , cv::Scalar + , cv::detail::VectorRef + , cv::detail::OpaqueRef + , cv::RMat + , cv::RMat::View + , cv::MediaFrame #if !defined(GAPI_STANDALONE) -using Mag = magazine::Class; -#else -using Mag = magazine::Class; + , cv::UMat #endif + >; namespace magazine { - void GAPI_EXPORTS bindInArg (Mag& mag, const RcDesc &rc, const GRunArg &arg, bool is_umat = false); - void GAPI_EXPORTS bindOutArg(Mag& mag, const RcDesc &rc, const GRunArgP &arg, bool is_umat = false); + enum class HandleRMat { BIND, SKIP }; + // Extracts a memory object from GRunArg, stores it in appropriate slot in a magazine + // Note: + // Only RMats are expected here as a memory object for GMat shape. + // If handleRMat is BIND, RMat will be accessed, and RMat::View and wrapping cv::Mat + // will be placed into the magazine. + // If handleRMat is SKIP, this function skips'RMat handling assuming that backend will do it on its own. + // FIXME? + // handleRMat parameter might be redundant if all device specific backends implement own bind routines + // without utilizing magazine at all + void GAPI_EXPORTS bindInArg (Mag& mag, const RcDesc &rc, const GRunArg &arg, HandleRMat handleRMat = HandleRMat::BIND); + + // Extracts a memory object reference fro GRunArgP, stores it in appropriate slot in a magazine + // Note on RMat handling from bindInArg above is also applied here + void GAPI_EXPORTS bindOutArg(Mag& mag, const RcDesc &rc, const GRunArgP &arg, HandleRMat handleRMat = HandleRMat::BIND); void resetInternalData(Mag& mag, const Data &d); cv::GRunArg getArg (const Mag& mag, const RcDesc &ref); cv::GRunArgP getObjPtr ( Mag& mag, const RcDesc &rc, bool is_umat = false); - void writeBack (const Mag& mag, const RcDesc &rc, GRunArgP &g_arg, bool is_umat = false); + void writeBack (const Mag& mag, const RcDesc &rc, GRunArgP &g_arg); + + // A mandatory clean-up procedure to force proper lifetime of wrappers (cv::Mat, cv::RMat::View) + // over not-owned data + // FIXME? Add an RAII wrapper for that? + // Or put objects which need to be cleaned-up into a separate stack allocated magazine? + void unbind(Mag &mag, const RcDesc &rc); } // namespace magazine namespace detail @@ -90,7 +159,7 @@ inline cv::util::optional getCompileArg(const cv::GCompileArgs &args) return cv::gapi::getCompileArg(args); } -void createMat(const cv::GMatDesc& desc, cv::Mat& mat); +void GAPI_EXPORTS createMat(const cv::GMatDesc& desc, cv::Mat& mat); }} // cv::gimpl diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/backends/common/gmetabackend.cpp b/inference-engine/thirdparty/fluid/modules/gapi/src/backends/common/gmetabackend.cpp new file mode 100644 index 00000000000..5364152b654 --- /dev/null +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/backends/common/gmetabackend.cpp @@ -0,0 +1,105 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Copyright (C) 2020 Intel Corporation + +#include "precomp.hpp" + +#include // compile args +#include // any +#include // GMeta + +#include "compiler/gobjref.hpp" // RcDesc +#include "compiler/gmodel.hpp" // GModel, Op +#include "backends/common/gbackend.hpp" +#include "api/gbackend_priv.hpp" // FIXME: Make it part of Backend SDK! + +#include "backends/common/gmetabackend.hpp" + +namespace { + +class GraphMetaExecutable final: public cv::gimpl::GIslandExecutable { + std::string m_meta_tag; + +public: + GraphMetaExecutable(const ade::Graph& g, + const std::vector& nodes); + bool canReshape() const override; + void reshape(ade::Graph&, const cv::GCompileArgs&) override; + + void run(std::vector &&input_objs, + std::vector &&output_objs) override; +}; + +bool GraphMetaExecutable::canReshape() const { + return true; +} +void GraphMetaExecutable::reshape(ade::Graph&, const cv::GCompileArgs&) { + // do nothing here +} + +GraphMetaExecutable::GraphMetaExecutable(const ade::Graph& g, + const std::vector& nodes) { + // There may be only one node in the graph + GAPI_Assert(nodes.size() == 1u); + + cv::gimpl::GModel::ConstGraph cg(g); + const auto &op = cg.metadata(nodes[0]).get(); + GAPI_Assert(op.k.name == cv::gapi::streaming::detail::GMeta::id()); + m_meta_tag = op.k.tag; +} + +void GraphMetaExecutable::run(std::vector &&input_objs, + std::vector &&output_objs) { + GAPI_Assert(input_objs.size() == 1u); + GAPI_Assert(output_objs.size() == 1u); + + const cv::GRunArg in_arg = input_objs[0].second; + cv::GRunArgP out_arg = output_objs[0].second; + + auto it = in_arg.meta.find(m_meta_tag); + if (it == in_arg.meta.end()) { + cv::util::throw_error + (std::logic_error("Run-time meta " + + m_meta_tag + + " is not found in object " + + std::to_string(static_cast(input_objs[0].first.shape)) + + "/" + + std::to_string(input_objs[0].first.id))); + } + cv::util::get(out_arg) = it->second; +} + +class GraphMetaBackendImpl final: public cv::gapi::GBackend::Priv { + virtual void unpackKernel(ade::Graph &, + const ade::NodeHandle &, + const cv::GKernelImpl &) override { + // Do nothing here + } + + virtual EPtr compile(const ade::Graph& graph, + const cv::GCompileArgs&, + const std::vector& nodes, + const std::vector&, + const std::vector&) const override { + return EPtr{new GraphMetaExecutable(graph, nodes)}; + } +}; + +cv::gapi::GBackend graph_meta_backend() { + static cv::gapi::GBackend this_backend(std::make_shared()); + return this_backend; +} + +struct InGraphMetaKernel final: public cv::detail::KernelTag { + using API = cv::gapi::streaming::detail::GMeta; + static cv::gapi::GBackend backend() { return graph_meta_backend(); } + static int kernel() { return 42; } +}; + +} // anonymous namespace + +cv::gapi::GKernelPackage cv::gimpl::meta::kernels() { + return cv::gapi::kernels(); +} diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/backends/common/gmetabackend.hpp b/inference-engine/thirdparty/fluid/modules/gapi/src/backends/common/gmetabackend.hpp new file mode 100644 index 00000000000..56f61d0e3df --- /dev/null +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/backends/common/gmetabackend.hpp @@ -0,0 +1,16 @@ +#ifndef OPENCV_GAPI_SRC_COMMON_META_BACKEND_HPP +#define OPENCV_GAPI_SRC_COMMON_META_BACKEND_HPP + +#include + +namespace cv { +namespace gimpl { +namespace meta { + +cv::gapi::GKernelPackage kernels(); + +} // namespace meta +} // namespace gimpl +} // namespace cv + +#endif // OPENCV_GAPI_SRC_COMMON_META_BACKEND_HPP diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/backends/common/serialization.cpp b/inference-engine/thirdparty/fluid/modules/gapi/src/backends/common/serialization.cpp index ab7626d43d4..8c2313b292d 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/src/backends/common/serialization.cpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/backends/common/serialization.cpp @@ -21,11 +21,11 @@ #include "backends/common/serialization.hpp" namespace cv { -namespace gimpl { +namespace gapi { namespace s11n { namespace { -void putData(GSerialized& s, const GModel::ConstGraph& cg, const ade::NodeHandle &nh) { +void putData(GSerialized& s, const cv::gimpl::GModel::ConstGraph& cg, const ade::NodeHandle &nh) { const auto gdata = cg.metadata(nh).get(); const auto it = ade::util::find_if(s.m_datas, [&gdata](const cv::gimpl::Data &cd) { return cd.rc == gdata.rc && cd.shape == gdata.shape; @@ -35,7 +35,7 @@ void putData(GSerialized& s, const GModel::ConstGraph& cg, const ade::NodeHandle } } -void putOp(GSerialized& s, const GModel::ConstGraph& cg, const ade::NodeHandle &nh) { +void putOp(GSerialized& s, const cv::gimpl::GModel::ConstGraph& cg, const ade::NodeHandle &nh) { const auto& op = cg.metadata(nh).get(); for (const auto &in_nh : nh->inNodes()) { putData(s, cg, in_nh); } for (const auto &out_nh : nh->outNodes()) { putData(s, cg, out_nh); } @@ -43,25 +43,25 @@ void putOp(GSerialized& s, const GModel::ConstGraph& cg, const ade::NodeHandle & } void mkDataNode(ade::Graph& g, const cv::gimpl::Data& data) { - GModel::Graph gm(g); + cv::gimpl::GModel::Graph gm(g); auto nh = gm.createNode(); - gm.metadata(nh).set(NodeType{NodeType::DATA}); + gm.metadata(nh).set(cv::gimpl::NodeType{cv::gimpl::NodeType::DATA}); gm.metadata(nh).set(data); } void mkOpNode(ade::Graph& g, const cv::gimpl::Op& op) { - GModel::Graph gm(g); + cv::gimpl::GModel::Graph gm(g); auto nh = gm.createNode(); - gm.metadata(nh).set(NodeType{NodeType::OP}); + gm.metadata(nh).set(cv::gimpl::NodeType{cv::gimpl::NodeType::OP}); gm.metadata(nh).set(op); } void linkNodes(ade::Graph& g) { std::map dataNodes; - GModel::Graph gm(g); + cv::gimpl::GModel::Graph gm(g); for (const auto& nh : g.nodes()) { - if (gm.metadata(nh).get().t == NodeType::DATA) { + if (gm.metadata(nh).get().t == cv::gimpl::NodeType::DATA) { const auto &d = gm.metadata(nh).get(); const auto rc = cv::gimpl::RcDesc{d.rc, d.shape, d.ctor}; dataNodes[rc] = nh; @@ -69,7 +69,7 @@ void linkNodes(ade::Graph& g) { } for (const auto& nh : g.nodes()) { - if (gm.metadata(nh).get().t == NodeType::OP) { + if (gm.metadata(nh).get().t == cv::gimpl::NodeType::OP) { const auto& op = gm.metadata(nh).get(); for (const auto& in : ade::util::indexed(op.args)) { const auto& arg = ade::util::value(in); @@ -78,7 +78,7 @@ void linkNodes(ade::Graph& g) { const auto rc = arg.get(); const auto& in_nh = dataNodes.at(rc); const auto& in_eh = g.link(in_nh, nh); - gm.metadata(in_eh).set(Input{idx}); + gm.metadata(in_eh).set(cv::gimpl::Input{idx}); } } @@ -87,19 +87,20 @@ void linkNodes(ade::Graph& g) { const auto rc = ade::util::value(out); const auto& out_nh = dataNodes.at(rc); const auto& out_eh = g.link(nh, out_nh); - gm.metadata(out_eh).set(Output{idx}); + gm.metadata(out_eh).set(cv::gimpl::Output{idx}); } } } } void relinkProto(ade::Graph& g) { + using namespace cv::gimpl; // identify which node handles map to the protocol // input/output object in the reconstructed graph - using S = std::set; // FIXME: use ... - using M = std::map; // FIXME: unordered! + using S = std::set; // FIXME: use ... + using M = std::map; // FIXME: unordered! - cv::gimpl::GModel::Graph gm(g); + GModel::Graph gm(g); auto &proto = gm.metadata().get(); const S set_in(proto.inputs.begin(), proto.inputs.end()); @@ -108,9 +109,9 @@ void relinkProto(ade::Graph& g) { // Associate the protocol node handles with their resource identifiers for (auto &&nh : gm.nodes()) { - if (gm.metadata(nh).get().t == cv::gimpl::NodeType::DATA) { - const auto &d = gm.metadata(nh).get(); - const auto rc = cv::gimpl::RcDesc{d.rc, d.shape, d.ctor}; + if (gm.metadata(nh).get().t == NodeType::DATA) { + const auto &d = gm.metadata(nh).get(); + const auto rc = RcDesc{d.rc, d.shape, d.ctor}; if (set_in.count(rc) > 0) { GAPI_DbgAssert(set_out.count(rc) == 0); map_in[rc] = nh; @@ -128,6 +129,12 @@ void relinkProto(ade::Graph& g) { proto.out_nhs.clear(); for (auto &rc : proto.inputs) { proto.in_nhs .push_back(map_in .at(rc)); } for (auto &rc : proto.outputs) { proto.out_nhs.push_back(map_out.at(rc)); } + + // If a subgraph is being serialized it's possible that + // some of its in/out nodes are INTERNAL in the full graph. + // Set their storage apporpriately + for (auto &nh : proto.in_nhs) { gm.metadata(nh).get().storage = Data::Storage::INPUT; } + for (auto &nh : proto.out_nhs) { gm.metadata(nh).get().storage = Data::Storage::OUTPUT; } } } // anonymous namespace @@ -138,76 +145,102 @@ void relinkProto(ade::Graph& g) { // OpenCV types //////////////////////////////////////////////////////////////// -I::OStream& operator<< (I::OStream& os, const cv::Point &pt) { +IOStream& operator<< (IOStream& os, const cv::Point &pt) { return os << pt.x << pt.y; } -I::IStream& operator>> (I::IStream& is, cv::Point& pt) { +IIStream& operator>> (IIStream& is, cv::Point& pt) { return is >> pt.x >> pt.y; } -I::OStream& operator<< (I::OStream& os, const cv::Size &sz) { +IOStream& operator<< (IOStream& os, const cv::Point2f &pt) { + return os << pt.x << pt.y; +} +IIStream& operator>> (IIStream& is, cv::Point2f& pt) { + return is >> pt.x >> pt.y; +} + +IOStream& operator<< (IOStream& os, const cv::Size &sz) { return os << sz.width << sz.height; } -I::IStream& operator>> (I::IStream& is, cv::Size& sz) { +IIStream& operator>> (IIStream& is, cv::Size& sz) { return is >> sz.width >> sz.height; } -I::OStream& operator<< (I::OStream& os, const cv::Rect &rc) { +IOStream& operator<< (IOStream& os, const cv::Rect &rc) { return os << rc.x << rc.y << rc.width << rc.height; } -I::IStream& operator>> (I::IStream& is, cv::Rect& rc) { +IIStream& operator>> (IIStream& is, cv::Rect& rc) { return is >> rc.x >> rc.y >> rc.width >> rc.height; } -I::OStream& operator<< (I::OStream& os, const cv::Scalar &s) { +IOStream& operator<< (IOStream& os, const cv::Scalar &s) { return os << s.val[0] << s.val[1] << s.val[2] << s.val[3]; } -I::IStream& operator>> (I::IStream& is, cv::Scalar& s) { +IIStream& operator>> (IIStream& is, cv::Scalar& s) { return is >> s.val[0] >> s.val[1] >> s.val[2] >> s.val[3]; } +IOStream& operator<< (IOStream& os, const cv::RMat& mat) { + mat.serialize(os); + return os; +} +IIStream& operator>> (IIStream& is, cv::RMat&) { + util::throw_error(std::logic_error("operator>> for RMat should never be called")); + return is; +} + +IOStream& operator<< (IOStream& os, const cv::MediaFrame &) { + // Stub + GAPI_Assert(false && "cv::MediaFrame serialization is not supported!"); + return os; +} +IIStream& operator>> (IIStream& is, cv::MediaFrame &) { + // Stub + GAPI_Assert(false && "cv::MediaFrame serialization is not supported!"); + return is; +} namespace { #if !defined(GAPI_STANDALONE) template - void write_plain(I::OStream &os, const T *arr, std::size_t sz) { + void write_plain(IOStream &os, const T *arr, std::size_t sz) { for (auto &&it : ade::util::iota(sz)) os << arr[it]; } template - void read_plain(I::IStream &is, T *arr, std::size_t sz) { + void read_plain(IIStream &is, T *arr, std::size_t sz) { for (auto &&it : ade::util::iota(sz)) is >> arr[it]; } template -void write_mat_data(I::OStream &os, const cv::Mat &m) { +void write_mat_data(IOStream &os, const cv::Mat &m) { // Write every row individually (handles the case when Mat is a view) for (auto &&r : ade::util::iota(m.rows)) { write_plain(os, m.ptr(r), m.cols*m.channels()); } } template -void read_mat_data(I::IStream &is, cv::Mat &m) { +void read_mat_data(IIStream &is, cv::Mat &m) { // Write every row individually (handles the case when Mat is aligned) for (auto &&r : ade::util::iota(m.rows)) { read_plain(is, m.ptr(r), m.cols*m.channels()); } } #else -void write_plain(I::OStream &os, const uchar *arr, std::size_t sz) { +void write_plain(IOStream &os, const uchar *arr, std::size_t sz) { for (auto &&it : ade::util::iota(sz)) os << arr[it]; } -void read_plain(I::IStream &is, uchar *arr, std::size_t sz) { +void read_plain(IIStream &is, uchar *arr, std::size_t sz) { for (auto &&it : ade::util::iota(sz)) is >> arr[it]; } template -void write_mat_data(I::OStream &os, const cv::Mat &m) { +void write_mat_data(IOStream &os, const cv::Mat &m) { // Write every row individually (handles the case when Mat is a view) for (auto &&r : ade::util::iota(m.rows)) { write_plain(os, m.ptr(r), m.cols*m.channels()*sizeof(T)); } } template -void read_mat_data(I::IStream &is, cv::Mat &m) { +void read_mat_data(IIStream &is, cv::Mat &m) { // Write every row individually (handles the case when Mat is aligned) for (auto &&r : ade::util::iota(m.rows)) { read_plain(is, m.ptr(r), m.cols*m.channels()*sizeof(T)); @@ -216,7 +249,7 @@ void read_mat_data(I::IStream &is, cv::Mat &m) { #endif } // namespace -I::OStream& operator<< (I::OStream& os, const cv::Mat &m) { +IOStream& operator<< (IOStream& os, const cv::Mat &m) { #if !defined(GAPI_STANDALONE) GAPI_Assert(m.size.dims() == 2 && "Only 2D images are supported now"); #else @@ -235,7 +268,7 @@ I::OStream& operator<< (I::OStream& os, const cv::Mat &m) { } return os; } -I::IStream& operator>> (I::IStream& is, cv::Mat& m) { +IIStream& operator>> (IIStream& is, cv::Mat& m) { int rows = -1, cols = -1, type = 0; is >> rows >> cols >> type; m.create(cv::Size(cols, rows), type); @@ -252,97 +285,109 @@ I::IStream& operator>> (I::IStream& is, cv::Mat& m) { return is; } -I::OStream& operator<< (I::OStream& os, const cv::gapi::wip::draw::Text &t) { +IOStream& operator<< (IOStream& os, const cv::gapi::wip::draw::Text &t) { return os << t.bottom_left_origin << t.color << t.ff << t.fs << t.lt << t.org << t.text << t.thick; } -I::IStream& operator>> (I::IStream& is, cv::gapi::wip::draw::Text &t) { +IIStream& operator>> (IIStream& is, cv::gapi::wip::draw::Text &t) { return is >> t.bottom_left_origin >> t.color >> t.ff >> t.fs >> t.lt >> t.org >> t.text >> t.thick; } -I::OStream& operator<< (I::OStream&, const cv::gapi::wip::draw::FText &) { +IOStream& operator<< (IOStream&, const cv::gapi::wip::draw::FText &) { GAPI_Assert(false && "Serialization: Unsupported << for FText"); } -I::IStream& operator>> (I::IStream&, cv::gapi::wip::draw::FText &) { +IIStream& operator>> (IIStream&, cv::gapi::wip::draw::FText &) { GAPI_Assert(false && "Serialization: Unsupported >> for FText"); } -I::OStream& operator<< (I::OStream& os, const cv::gapi::wip::draw::Circle &c) { +IOStream& operator<< (IOStream& os, const cv::gapi::wip::draw::Circle &c) { return os << c.center << c.color << c.lt << c.radius << c.shift << c.thick; } -I::IStream& operator>> (I::IStream& is, cv::gapi::wip::draw::Circle &c) { +IIStream& operator>> (IIStream& is, cv::gapi::wip::draw::Circle &c) { return is >> c.center >> c.color >> c.lt >> c.radius >> c.shift >> c.thick; } -I::OStream& operator<< (I::OStream& os, const cv::gapi::wip::draw::Rect &r) { +IOStream& operator<< (IOStream& os, const cv::gapi::wip::draw::Rect &r) { return os << r.color << r.lt << r.rect << r.shift << r.thick; } -I::IStream& operator>> (I::IStream& is, cv::gapi::wip::draw::Rect &r) { +IIStream& operator>> (IIStream& is, cv::gapi::wip::draw::Rect &r) { return is >> r.color >> r.lt >> r.rect >> r.shift >> r.thick; } -I::OStream& operator<< (I::OStream& os, const cv::gapi::wip::draw::Image &i) { +IOStream& operator<< (IOStream& os, const cv::gapi::wip::draw::Image &i) { return os << i.org << i.alpha << i.img; } -I::IStream& operator>> (I::IStream& is, cv::gapi::wip::draw::Image &i) { +IIStream& operator>> (IIStream& is, cv::gapi::wip::draw::Image &i) { return is >> i.org >> i.alpha >> i.img; } -I::OStream& operator<< (I::OStream& os, const cv::gapi::wip::draw::Mosaic &m) { +IOStream& operator<< (IOStream& os, const cv::gapi::wip::draw::Mosaic &m) { return os << m.cellSz << m.decim << m.mos; } -I::IStream& operator>> (I::IStream& is, cv::gapi::wip::draw::Mosaic &m) { +IIStream& operator>> (IIStream& is, cv::gapi::wip::draw::Mosaic &m) { return is >> m.cellSz >> m.decim >> m.mos; } -I::OStream& operator<< (I::OStream& os, const cv::gapi::wip::draw::Poly &p) { +IOStream& operator<< (IOStream& os, const cv::gapi::wip::draw::Poly &p) { return os << p.color << p.lt << p.points << p.shift << p.thick; } -I::IStream& operator>> (I::IStream& is, cv::gapi::wip::draw::Poly &p) { +IIStream& operator>> (IIStream& is, cv::gapi::wip::draw::Poly &p) { return is >> p.color >> p.lt >> p.points >> p.shift >> p.thick; } -I::OStream& operator<< (I::OStream& os, const cv::gapi::wip::draw::Line &l) { +IOStream& operator<< (IOStream& os, const cv::gapi::wip::draw::Line &l) { return os << l.color << l.lt << l.pt1 << l.pt2 << l.shift << l.thick; } -I::IStream& operator>> (I::IStream& is, cv::gapi::wip::draw::Line &l) { +IIStream& operator>> (IIStream& is, cv::gapi::wip::draw::Line &l) { return is >> l.color >> l.lt >> l.pt1 >> l.pt2 >> l.shift >> l.thick; } // G-API types ///////////////////////////////////////////////////////////////// +IOStream& operator<< (IOStream& os, const cv::GCompileArg& arg) +{ + ByteMemoryOutStream tmpS; + arg.serialize(tmpS); + std::vector data = tmpS.data(); + + os << arg.tag; + os << data; + + return os; +} + // Stubs (empty types) -I::OStream& operator<< (I::OStream& os, cv::util::monostate ) {return os;} -I::IStream& operator>> (I::IStream& is, cv::util::monostate &) {return is;} +IOStream& operator<< (IOStream& os, cv::util::monostate ) {return os;} +IIStream& operator>> (IIStream& is, cv::util::monostate &) {return is;} -I::OStream& operator<< (I::OStream& os, const cv::GScalarDesc &) {return os;} -I::IStream& operator>> (I::IStream& is, cv::GScalarDesc &) {return is;} +IOStream& operator<< (IOStream& os, const cv::GScalarDesc &) {return os;} +IIStream& operator>> (IIStream& is, cv::GScalarDesc &) {return is;} -I::OStream& operator<< (I::OStream& os, const cv::GOpaqueDesc &) {return os;} -I::IStream& operator>> (I::IStream& is, cv::GOpaqueDesc &) {return is;} +IOStream& operator<< (IOStream& os, const cv::GOpaqueDesc &) {return os;} +IIStream& operator>> (IIStream& is, cv::GOpaqueDesc &) {return is;} -I::OStream& operator<< (I::OStream& os, const cv::GArrayDesc &) {return os;} -I::IStream& operator>> (I::IStream& is, cv::GArrayDesc &) {return is;} +IOStream& operator<< (IOStream& os, const cv::GArrayDesc &) {return os;} +IIStream& operator>> (IIStream& is, cv::GArrayDesc &) {return is;} #if !defined(GAPI_STANDALONE) -I::OStream& operator<< (I::OStream& os, const cv::UMat &) +IOStream& operator<< (IOStream& os, const cv::UMat &) { GAPI_Assert(false && "Serialization: Unsupported << for UMat"); return os; } -I::IStream& operator >> (I::IStream& is, cv::UMat &) +IIStream& operator >> (IIStream& is, cv::UMat &) { GAPI_Assert(false && "Serialization: Unsupported >> for UMat"); return is; } #endif // !defined(GAPI_STANDALONE) -I::OStream& operator<< (I::OStream& os, const cv::gapi::wip::IStreamSource::Ptr &) +IOStream& operator<< (IOStream& os, const cv::gapi::wip::IStreamSource::Ptr &) { GAPI_Assert(false && "Serialization: Unsupported << for IStreamSource::Ptr"); return os; } -I::IStream& operator >> (I::IStream& is, cv::gapi::wip::IStreamSource::Ptr &) +IIStream& operator >> (IIStream& is, cv::gapi::wip::IStreamSource::Ptr &) { GAPI_Assert("Serialization: Unsupported >> for IStreamSource::Ptr"); return is; @@ -356,7 +401,7 @@ struct putToStream; template struct putToStream> { - static void put(I::OStream&, const Ref &) + static void put(IOStream&, const Ref &) { GAPI_Assert(false && "Unsupported type for GArray/GOpaque serialization"); } @@ -365,7 +410,7 @@ struct putToStream> template struct putToStream> { - static void put(I::OStream& os, const Ref &r) + static void put(IOStream& os, const Ref &r) { if (r.getKind() == cv::detail::GOpaqueTraits::kind) { os << r.template rref(); @@ -381,7 +426,7 @@ struct getFromStream; template struct getFromStream> { - static void get(I::IStream&, Ref &, cv::detail::OpaqueKind) + static void get(IIStream&, Ref &, cv::detail::OpaqueKind) { GAPI_Assert(false && "Unsupported type for GArray/GOpaque deserialization"); } @@ -390,7 +435,7 @@ struct getFromStream> template struct getFromStream> { - static void get(I::IStream& is, Ref &r, cv::detail::OpaqueKind kind) { + static void get(IIStream& is, Ref &r, cv::detail::OpaqueKind kind) { if (kind == cv::detail::GOpaqueTraits::kind) { r.template reset(); auto& val = r.template wref(); @@ -402,13 +447,13 @@ struct getFromStream> }; } -I::OStream& operator<< (I::OStream& os, const cv::detail::VectorRef& ref) +IOStream& operator<< (IOStream& os, const cv::detail::VectorRef& ref) { os << ref.getKind(); putToStream::put(os, ref); return os; } -I::IStream& operator >> (I::IStream& is, cv::detail::VectorRef& ref) +IIStream& operator >> (IIStream& is, cv::detail::VectorRef& ref) { cv::detail::OpaqueKind kind; is >> kind; @@ -416,13 +461,13 @@ I::IStream& operator >> (I::IStream& is, cv::detail::VectorRef& ref) return is; } -I::OStream& operator<< (I::OStream& os, const cv::detail::OpaqueRef& ref) +IOStream& operator<< (IOStream& os, const cv::detail::OpaqueRef& ref) { os << ref.getKind(); putToStream::put(os, ref); return os; } -I::IStream& operator >> (I::IStream& is, cv::detail::OpaqueRef& ref) +IIStream& operator >> (IIStream& is, cv::detail::OpaqueRef& ref) { cv::detail::OpaqueKind kind; is >> kind; @@ -432,41 +477,41 @@ I::IStream& operator >> (I::IStream& is, cv::detail::OpaqueRef& ref) // Enums and structures namespace { -template I::OStream& put_enum(I::OStream& os, E e) { +template IOStream& put_enum(IOStream& os, E e) { return os << static_cast(e); } -template I::IStream& get_enum(I::IStream& is, E &e) { +template IIStream& get_enum(IIStream& is, E &e) { int x{}; is >> x; e = static_cast(x); return is; } } // anonymous namespace -I::OStream& operator<< (I::OStream& os, cv::GShape sh) { +IOStream& operator<< (IOStream& os, cv::GShape sh) { return put_enum(os, sh); } -I::IStream& operator>> (I::IStream& is, cv::GShape &sh) { +IIStream& operator>> (IIStream& is, cv::GShape &sh) { return get_enum(is, sh); } -I::OStream& operator<< (I::OStream& os, cv::detail::ArgKind k) { +IOStream& operator<< (IOStream& os, cv::detail::ArgKind k) { return put_enum(os, k); } -I::IStream& operator>> (I::IStream& is, cv::detail::ArgKind &k) { +IIStream& operator>> (IIStream& is, cv::detail::ArgKind &k) { return get_enum(is, k); } -I::OStream& operator<< (I::OStream& os, cv::detail::OpaqueKind k) { +IOStream& operator<< (IOStream& os, cv::detail::OpaqueKind k) { return put_enum(os, k); } -I::IStream& operator>> (I::IStream& is, cv::detail::OpaqueKind &k) { +IIStream& operator>> (IIStream& is, cv::detail::OpaqueKind &k) { return get_enum(is, k); } -I::OStream& operator<< (I::OStream& os, cv::gimpl::Data::Storage s) { +IOStream& operator<< (IOStream& os, cv::gimpl::Data::Storage s) { return put_enum(os, s); } -I::IStream& operator>> (I::IStream& is, cv::gimpl::Data::Storage &s) { +IIStream& operator>> (IIStream& is, cv::gimpl::Data::Storage &s) { return get_enum(is, s); } -I::OStream& operator<< (I::OStream& os, const cv::GArg &arg) { +IOStream& operator<< (IOStream& os, const cv::GArg &arg) { // Only GOBJREF and OPAQUE_VAL kinds can be serialized/deserialized GAPI_Assert( arg.kind == cv::detail::ArgKind::OPAQUE_VAL || arg.kind == cv::detail::ArgKind::GOBJREF); @@ -478,21 +523,24 @@ I::OStream& operator<< (I::OStream& os, const cv::GArg &arg) { GAPI_Assert(arg.kind == cv::detail::ArgKind::OPAQUE_VAL); GAPI_Assert(arg.opaque_kind != cv::detail::OpaqueKind::CV_UNKNOWN); switch (arg.opaque_kind) { - case cv::detail::OpaqueKind::CV_BOOL: os << arg.get(); break; - case cv::detail::OpaqueKind::CV_INT: os << arg.get(); break; - case cv::detail::OpaqueKind::CV_DOUBLE: os << arg.get(); break; - case cv::detail::OpaqueKind::CV_POINT: os << arg.get(); break; - case cv::detail::OpaqueKind::CV_SIZE: os << arg.get(); break; - case cv::detail::OpaqueKind::CV_RECT: os << arg.get(); break; - case cv::detail::OpaqueKind::CV_SCALAR: os << arg.get(); break; - case cv::detail::OpaqueKind::CV_MAT: os << arg.get(); break; + case cv::detail::OpaqueKind::CV_BOOL: os << arg.get(); break; + case cv::detail::OpaqueKind::CV_INT: os << arg.get(); break; + case cv::detail::OpaqueKind::CV_UINT64: os << arg.get(); break; + case cv::detail::OpaqueKind::CV_DOUBLE: os << arg.get(); break; + case cv::detail::OpaqueKind::CV_FLOAT: os << arg.get(); break; + case cv::detail::OpaqueKind::CV_STRING: os << arg.get(); break; + case cv::detail::OpaqueKind::CV_POINT: os << arg.get(); break; + case cv::detail::OpaqueKind::CV_SIZE: os << arg.get(); break; + case cv::detail::OpaqueKind::CV_RECT: os << arg.get(); break; + case cv::detail::OpaqueKind::CV_SCALAR: os << arg.get(); break; + case cv::detail::OpaqueKind::CV_MAT: os << arg.get(); break; default: GAPI_Assert(false && "GArg: Unsupported (unknown?) opaque value type"); } } return os; } -I::IStream& operator>> (I::IStream& is, cv::GArg &arg) { +IIStream& operator>> (IIStream& is, cv::GArg &arg) { is >> arg.kind >> arg.opaque_kind; // Only GOBJREF and OPAQUE_VAL kinds can be serialized/deserialized @@ -509,14 +557,18 @@ I::IStream& operator>> (I::IStream& is, cv::GArg &arg) { switch (arg.opaque_kind) { #define HANDLE_CASE(E,T) case cv::detail::OpaqueKind::CV_##E: \ { T t{}; is >> t; arg = (cv::GArg(t)); } break - HANDLE_CASE(BOOL , bool); - HANDLE_CASE(INT , int); - HANDLE_CASE(DOUBLE , double); - HANDLE_CASE(POINT , cv::Point); - HANDLE_CASE(SIZE , cv::Size); - HANDLE_CASE(RECT , cv::Rect); - HANDLE_CASE(SCALAR , cv::Scalar); - HANDLE_CASE(MAT , cv::Mat); + HANDLE_CASE(BOOL , bool); + HANDLE_CASE(INT , int); + HANDLE_CASE(UINT64 , uint64_t); + HANDLE_CASE(DOUBLE , double); + HANDLE_CASE(FLOAT , float); + HANDLE_CASE(STRING , std::string); + HANDLE_CASE(POINT , cv::Point); + HANDLE_CASE(POINT2F , cv::Point2f); + HANDLE_CASE(SIZE , cv::Size); + HANDLE_CASE(RECT , cv::Rect); + HANDLE_CASE(SCALAR , cv::Scalar); + HANDLE_CASE(MAT , cv::Mat); #undef HANDLE_CASE default: GAPI_Assert(false && "GArg: Unsupported (unknown?) opaque value type"); } @@ -524,43 +576,49 @@ I::IStream& operator>> (I::IStream& is, cv::GArg &arg) { return is; } -I::OStream& operator<< (I::OStream& os, const cv::GKernel &k) { +IOStream& operator<< (IOStream& os, const cv::GKernel &k) { return os << k.name << k.tag << k.outShapes; } -I::IStream& operator>> (I::IStream& is, cv::GKernel &k) { +IIStream& operator>> (IIStream& is, cv::GKernel &k) { return is >> const_cast(k.name) >> const_cast(k.tag) >> const_cast(k.outShapes); } -I::OStream& operator<< (I::OStream& os, const cv::GMatDesc &d) { +IOStream& operator<< (IOStream& os, const cv::GMatDesc &d) { return os << d.depth << d.chan << d.size << d.planar << d.dims; } -I::IStream& operator>> (I::IStream& is, cv::GMatDesc &d) { +IIStream& operator>> (IIStream& is, cv::GMatDesc &d) { return is >> d.depth >> d.chan >> d.size >> d.planar >> d.dims; } +IOStream& operator<< (IOStream& os, const cv::GFrameDesc &d) { + return put_enum(os, d.fmt) << d.size; +} +IIStream& operator>> (IIStream& is, cv::GFrameDesc &d) { + return get_enum(is, d.fmt) >> d.size; +} -I::OStream& operator<< (I::OStream& os, const cv::gimpl::RcDesc &rc) { +IOStream& operator<< (IOStream& os, const cv::gimpl::RcDesc &rc) { // FIXME: HostCtor is not serialized! return os << rc.id << rc.shape; } -I::IStream& operator>> (I::IStream& is, cv::gimpl::RcDesc &rc) { +IIStream& operator>> (IIStream& is, cv::gimpl::RcDesc &rc) { // FIXME: HostCtor is not deserialized! return is >> rc.id >> rc.shape; } -I::OStream& operator<< (I::OStream& os, const cv::gimpl::Op &op) { +IOStream& operator<< (IOStream& os, const cv::gimpl::Op &op) { return os << op.k << op.args << op.outs; } -I::IStream& operator>> (I::IStream& is, cv::gimpl::Op &op) { +IIStream& operator>> (IIStream& is, cv::gimpl::Op &op) { return is >> op.k >> op.args >> op.outs; } -I::OStream& operator<< (I::OStream& os, const cv::gimpl::Data &d) { +IOStream& operator<< (IOStream& os, const cv::gimpl::Data &d) { // FIXME: HostCtor is not stored here!! // FIXME: Storage may be incorrect for subgraph-to-graph process return os << d.shape << d.rc << d.meta << d.storage << d.kind; @@ -594,7 +652,7 @@ struct initCtor> }; } // anonymous namespace -I::IStream& operator>> (I::IStream& is, cv::gimpl::Data &d) { +IIStream& operator>> (IIStream& is, cv::gimpl::Data &d) { // FIXME: HostCtor is not stored here!! // FIXME: Storage may be incorrect for subgraph-to-graph process is >> d.shape >> d.rc >> d.meta >> d.storage >> d.kind; @@ -610,42 +668,42 @@ I::IStream& operator>> (I::IStream& is, cv::gimpl::Data &d) { } -I::OStream& operator<< (I::OStream& os, const cv::gimpl::DataObjectCounter &c) { +IOStream& operator<< (IOStream& os, const cv::gimpl::DataObjectCounter &c) { return os << c.m_next_data_id; } -I::IStream& operator>> (I::IStream& is, cv::gimpl::DataObjectCounter &c) { +IIStream& operator>> (IIStream& is, cv::gimpl::DataObjectCounter &c) { return is >> c.m_next_data_id; } -I::OStream& operator<< (I::OStream& os, const cv::gimpl::Protocol &p) { +IOStream& operator<< (IOStream& os, const cv::gimpl::Protocol &p) { // NB: in_nhs/out_nhs are not written! return os << p.inputs << p.outputs; } -I::IStream& operator>> (I::IStream& is, cv::gimpl::Protocol &p) { +IIStream& operator>> (IIStream& is, cv::gimpl::Protocol &p) { // NB: in_nhs/out_nhs are reconstructed at a later phase return is >> p.inputs >> p.outputs; } -void serialize( I::OStream& os +void serialize( IOStream& os , const ade::Graph &g , const std::vector &nodes) { cv::gimpl::GModel::ConstGraph cg(g); serialize(os, g, cg.metadata().get(), nodes); } -void serialize( I::OStream& os +void serialize( IOStream& os , const ade::Graph &g , const cv::gimpl::Protocol &p , const std::vector &nodes) { cv::gimpl::GModel::ConstGraph cg(g); GSerialized s; for (auto &nh : nodes) { - switch (cg.metadata(nh).get().t) + switch (cg.metadata(nh).get().t) { - case NodeType::OP: putOp (s, cg, nh); break; - case NodeType::DATA: putData(s, cg, nh); break; + case cv::gimpl::NodeType::OP: putOp (s, cg, nh); break; + case cv::gimpl::NodeType::DATA: putData(s, cg, nh); break; default: util::throw_error(std::logic_error("Unknown NodeType")); } } @@ -654,7 +712,7 @@ void serialize( I::OStream& os os << s.m_ops << s.m_datas << s.m_counter << s.m_proto; } -GSerialized deserialize(I::IStream &is) { +GSerialized deserialize(IIStream &is) { GSerialized s; is >> s.m_ops >> s.m_datas >> s.m_counter >> s.m_proto; return s; @@ -662,14 +720,14 @@ GSerialized deserialize(I::IStream &is) { void reconstruct(const GSerialized &s, ade::Graph &g) { GAPI_Assert(g.nodes().empty()); - for (const auto& d : s.m_datas) cv::gimpl::s11n::mkDataNode(g, d); - for (const auto& op : s.m_ops) cv::gimpl::s11n::mkOpNode(g, op); - cv::gimpl::s11n::linkNodes(g); + for (const auto& d : s.m_datas) cv::gapi::s11n::mkDataNode(g, d); + for (const auto& op : s.m_ops) cv::gapi::s11n::mkOpNode(g, op); + cv::gapi::s11n::linkNodes(g); cv::gimpl::GModel::Graph gm(g); gm.metadata().set(s.m_counter); gm.metadata().set(s.m_proto); - cv::gimpl::s11n::relinkProto(g); + cv::gapi::s11n::relinkProto(g); gm.metadata().set(cv::gimpl::Deserialized{}); } @@ -679,48 +737,54 @@ void reconstruct(const GSerialized &s, ade::Graph &g) { const std::vector& ByteMemoryOutStream::data() const { return m_storage; } -I::OStream& ByteMemoryOutStream::operator<< (uint32_t atom) { +IOStream& ByteMemoryOutStream::operator<< (uint32_t atom) { m_storage.push_back(0xFF & (atom)); m_storage.push_back(0xFF & (atom >> 8)); m_storage.push_back(0xFF & (atom >> 16)); m_storage.push_back(0xFF & (atom >> 24)); return *this; } -I::OStream& ByteMemoryOutStream::operator<< (bool atom) { +IOStream& ByteMemoryOutStream::operator<< (uint64_t atom) { + for (int i = 0; i < 8; ++i) { + m_storage.push_back(0xFF & (atom >> (i * 8)));; + } + return *this; +} +IOStream& ByteMemoryOutStream::operator<< (bool atom) { m_storage.push_back(atom ? 1 : 0); return *this; } -I::OStream& ByteMemoryOutStream::operator<< (char atom) { +IOStream& ByteMemoryOutStream::operator<< (char atom) { m_storage.push_back(atom); return *this; } -I::OStream& ByteMemoryOutStream::operator<< (unsigned char atom) { +IOStream& ByteMemoryOutStream::operator<< (unsigned char atom) { return *this << static_cast(atom); } -I::OStream& ByteMemoryOutStream::operator<< (short atom) { +IOStream& ByteMemoryOutStream::operator<< (short atom) { static_assert(sizeof(short) == 2, "Expecting sizeof(short) == 2"); m_storage.push_back(0xFF & (atom)); m_storage.push_back(0xFF & (atom >> 8)); return *this; } -I::OStream& ByteMemoryOutStream::operator<< (unsigned short atom) { +IOStream& ByteMemoryOutStream::operator<< (unsigned short atom) { return *this << static_cast(atom); } -I::OStream& ByteMemoryOutStream::operator<< (int atom) { +IOStream& ByteMemoryOutStream::operator<< (int atom) { static_assert(sizeof(int) == 4, "Expecting sizeof(int) == 4"); return *this << static_cast(atom); } -//I::OStream& ByteMemoryOutStream::operator<< (std::size_t atom) { +//IOStream& ByteMemoryOutStream::operator<< (std::size_t atom) { // // NB: type truncated! // return *this << static_cast(atom); //} -I::OStream& ByteMemoryOutStream::operator<< (float atom) { +IOStream& ByteMemoryOutStream::operator<< (float atom) { static_assert(sizeof(float) == 4, "Expecting sizeof(float) == 4"); uint32_t tmp = 0u; memcpy(&tmp, &atom, sizeof(float)); return *this << static_cast(htonl(tmp)); } -I::OStream& ByteMemoryOutStream::operator<< (double atom) { +IOStream& ByteMemoryOutStream::operator<< (double atom) { static_assert(sizeof(double) == 8, "Expecting sizeof(double) == 8"); uint32_t tmp[2] = {0u}; memcpy(tmp, &atom, sizeof(double)); @@ -728,17 +792,16 @@ I::OStream& ByteMemoryOutStream::operator<< (double atom) { *this << static_cast(htonl(tmp[1])); return *this; } -I::OStream& ByteMemoryOutStream::operator<< (const std::string &str) { +IOStream& ByteMemoryOutStream::operator<< (const std::string &str) { //*this << static_cast(str.size()); // N.B. Put type explicitly *this << static_cast(str.size()); // N.B. Put type explicitly for (auto c : str) *this << c; return *this; } - ByteMemoryInStream::ByteMemoryInStream(const std::vector &data) : m_storage(data) { } -I::IStream& ByteMemoryInStream::operator>> (uint32_t &atom) { +IIStream& ByteMemoryInStream::operator>> (uint32_t &atom) { check(sizeof(uint32_t)); uint8_t x[4]; x[0] = static_cast(m_storage[m_idx++]); @@ -748,23 +811,38 @@ I::IStream& ByteMemoryInStream::operator>> (uint32_t &atom) { atom = ((x[0]) | (x[1] << 8) | (x[2] << 16) | (x[3] << 24)); return *this; } -I::IStream& ByteMemoryInStream::operator>> (bool& atom) { +IIStream& ByteMemoryInStream::operator>> (bool& atom) { check(sizeof(char)); atom = (m_storage[m_idx++] == 0) ? false : true; return *this; } -I::IStream& ByteMemoryInStream::operator>> (char &atom) { +IIStream& ByteMemoryInStream::operator>> (std::vector::reference atom) { + check(sizeof(char)); + atom = (m_storage[m_idx++] == 0) ? false : true; + return *this; +} +IIStream& ByteMemoryInStream::operator>> (char &atom) { check(sizeof(char)); atom = m_storage[m_idx++]; return *this; } -I::IStream& ByteMemoryInStream::operator>> (unsigned char &atom) { +IIStream& ByteMemoryInStream::operator>> (uint64_t &atom) { + check(sizeof(uint64_t)); + uint8_t x[8]; + atom = 0; + for (int i = 0; i < 8; ++i) { + x[i] = static_cast(m_storage[m_idx++]); + atom |= (static_cast(x[i]) << (i * 8)); + } + return *this; +} +IIStream& ByteMemoryInStream::operator>> (unsigned char &atom) { char c{}; *this >> c; atom = static_cast(c); return *this; } -I::IStream& ByteMemoryInStream::operator>> (short &atom) { +IIStream& ByteMemoryInStream::operator>> (short &atom) { static_assert(sizeof(short) == 2, "Expecting sizeof(short) == 2"); check(sizeof(short)); uint8_t x[2]; @@ -773,35 +851,35 @@ I::IStream& ByteMemoryInStream::operator>> (short &atom) { atom = ((x[0]) | (x[1] << 8)); return *this; } -I::IStream& ByteMemoryInStream::operator>> (unsigned short &atom) { +IIStream& ByteMemoryInStream::operator>> (unsigned short &atom) { short s{}; *this >> s; atom = static_cast(s); return *this; } -I::IStream& ByteMemoryInStream::operator>> (int& atom) { +IIStream& ByteMemoryInStream::operator>> (int& atom) { static_assert(sizeof(int) == 4, "Expecting sizeof(int) == 4"); atom = static_cast(getU32()); return *this; } -//I::IStream& ByteMemoryInStream::operator>> (std::size_t& atom) { +//IIStream& ByteMemoryInStream::operator>> (std::size_t& atom) { // // NB. Type was truncated! // atom = static_cast(getU32()); // return *this; //} -I::IStream& ByteMemoryInStream::operator>> (float& atom) { +IIStream& ByteMemoryInStream::operator>> (float& atom) { static_assert(sizeof(float) == 4, "Expecting sizeof(float) == 4"); uint32_t tmp = ntohl(getU32()); memcpy(&atom, &tmp, sizeof(float)); return *this; } -I::IStream& ByteMemoryInStream::operator>> (double& atom) { +IIStream& ByteMemoryInStream::operator>> (double& atom) { static_assert(sizeof(double) == 8, "Expecting sizeof(double) == 8"); uint32_t tmp[2] = {ntohl(getU32()), ntohl(getU32())}; memcpy(&atom, tmp, sizeof(double)); return *this; } -I::IStream& ByteMemoryInStream::operator>> (std::string& str) { +IIStream& ByteMemoryInStream::operator>> (std::string& str) { //std::size_t sz = 0u; uint32_t sz = 0u; *this >> sz; @@ -814,24 +892,31 @@ I::IStream& ByteMemoryInStream::operator>> (std::string& str) { return *this; } -GAPI_EXPORTS void serialize(I::OStream& os, const cv::GMetaArgs &ma) { +GAPI_EXPORTS std::unique_ptr detail::getInStream(const std::vector &p) { + return std::unique_ptr(new ByteMemoryInStream(p)); +} + +GAPI_EXPORTS void serialize(IOStream& os, const cv::GCompileArgs &ca) { + os << ca; +} + +GAPI_EXPORTS void serialize(IOStream& os, const cv::GMetaArgs &ma) { os << ma; } -GAPI_EXPORTS void serialize(I::OStream& os, const cv::GRunArgs &ra) { +GAPI_EXPORTS void serialize(IOStream& os, const cv::GRunArgs &ra) { os << ra; } -GAPI_EXPORTS GMetaArgs meta_args_deserialize(I::IStream& is) { +GAPI_EXPORTS GMetaArgs meta_args_deserialize(IIStream& is) { GMetaArgs s; is >> s; return s; } -GAPI_EXPORTS GRunArgs run_args_deserialize(I::IStream& is) { +GAPI_EXPORTS GRunArgs run_args_deserialize(IIStream& is) { GRunArgs s; is >> s; return s; } - } // namespace s11n -} // namespace gimpl +} // namespace gapi } // namespace cv diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/backends/common/serialization.hpp b/inference-engine/thirdparty/fluid/modules/gapi/src/backends/common/serialization.hpp index 9286bc4391e..a3134d84d28 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/src/backends/common/serialization.hpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/backends/common/serialization.hpp @@ -9,19 +9,21 @@ #include #include -#include +#include +#include #include // used in the vector<> #include "compiler/gmodel.hpp" #include "opencv2/gapi/render/render_types.hpp" +#include "opencv2/gapi/s11n.hpp" // basic interfaces #if (defined _WIN32 || defined _WIN64) && defined _MSC_VER #pragma warning(disable: 4702) #endif namespace cv { -namespace gimpl { +namespace gapi { namespace s11n { struct GSerialized { @@ -31,168 +33,107 @@ struct GSerialized { cv::gimpl::Protocol m_proto; }; -//////////////////////////////////////////////////////////////////////////////// -// Stream interfaces, so far temporary -namespace I { - struct GAPI_EXPORTS OStream { - virtual ~OStream() = default; - - // Define the native support for basic C++ types at the API level: - virtual OStream& operator<< (bool) = 0; - virtual OStream& operator<< (char) = 0; - virtual OStream& operator<< (unsigned char) = 0; - virtual OStream& operator<< (short) = 0; - virtual OStream& operator<< (unsigned short) = 0; - virtual OStream& operator<< (int) = 0; - //virtual OStream& operator<< (std::size_t) = 0; - virtual OStream& operator<< (uint32_t) = 0; - virtual OStream& operator<< (float) = 0; - virtual OStream& operator<< (double) = 0; - virtual OStream& operator<< (const std::string&) = 0; - }; - - struct GAPI_EXPORTS IStream { - virtual ~IStream() = default; - - virtual IStream& operator>> (bool &) = 0; - virtual IStream& operator>> (char &) = 0; - virtual IStream& operator>> (unsigned char &) = 0; - virtual IStream& operator>> (short &) = 0; - virtual IStream& operator>> (unsigned short &) = 0; - virtual IStream& operator>> (int &) = 0; - virtual IStream& operator>> (float &) = 0; - virtual IStream& operator>> (double &) = 0; - //virtual IStream& operator>> (std::size_t &) = 0; - virtual IStream& operator >> (uint32_t &) = 0; - virtual IStream& operator>> (std::string &) = 0; - }; -} // namespace I - //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// // S11N operators -// Note: operators for basic types are defined in IStream/OStream - -// OpenCV types //////////////////////////////////////////////////////////////// - -GAPI_EXPORTS I::OStream& operator<< (I::OStream& os, const cv::Point &pt); -GAPI_EXPORTS I::IStream& operator>> (I::IStream& is, cv::Point &pt); - -GAPI_EXPORTS I::OStream& operator<< (I::OStream& os, const cv::Size &sz); -GAPI_EXPORTS I::IStream& operator>> (I::IStream& is, cv::Size &sz); - -GAPI_EXPORTS I::OStream& operator<< (I::OStream& os, const cv::Rect &rc); -GAPI_EXPORTS I::IStream& operator>> (I::IStream& is, cv::Rect &rc); - -GAPI_EXPORTS I::OStream& operator<< (I::OStream& os, const cv::Scalar &s); -GAPI_EXPORTS I::IStream& operator>> (I::IStream& is, cv::Scalar &s); - -GAPI_EXPORTS I::OStream& operator<< (I::OStream& os, const cv::Mat &m); -GAPI_EXPORTS I::IStream& operator>> (I::IStream& is, cv::Mat &m); - -GAPI_EXPORTS I::OStream& operator<< (I::OStream& os, const cv::gapi::wip::draw::Text &t); -GAPI_EXPORTS I::IStream& operator>> (I::IStream& is, cv::gapi::wip::draw::Text &t); - -GAPI_EXPORTS I::OStream& operator<< (I::OStream&, const cv::gapi::wip::draw::FText &); -GAPI_EXPORTS I::IStream& operator>> (I::IStream&, cv::gapi::wip::draw::FText &); - -GAPI_EXPORTS I::OStream& operator<< (I::OStream& os, const cv::gapi::wip::draw::Circle &c); -GAPI_EXPORTS I::IStream& operator>> (I::IStream& is, cv::gapi::wip::draw::Circle &c); - -GAPI_EXPORTS I::OStream& operator<< (I::OStream& os, const cv::gapi::wip::draw::Rect &r); -GAPI_EXPORTS I::IStream& operator>> (I::IStream& is, cv::gapi::wip::draw::Rect &r); - -GAPI_EXPORTS I::OStream& operator<< (I::OStream& os, const cv::gapi::wip::draw::Image &i); -GAPI_EXPORTS I::IStream& operator>> (I::IStream& is, cv::gapi::wip::draw::Image &i); - -GAPI_EXPORTS I::OStream& operator<< (I::OStream& os, const cv::gapi::wip::draw::Mosaic &m); -GAPI_EXPORTS I::IStream& operator>> (I::IStream& is, cv::gapi::wip::draw::Mosaic &m); - -GAPI_EXPORTS I::OStream& operator<< (I::OStream& os, const cv::gapi::wip::draw::Poly &p); -GAPI_EXPORTS I::IStream& operator>> (I::IStream& is, cv::gapi::wip::draw::Poly &p); - -GAPI_EXPORTS I::OStream& operator<< (I::OStream& os, const cv::gapi::wip::draw::Line &l); -GAPI_EXPORTS I::IStream& operator>> (I::IStream& is, cv::gapi::wip::draw::Line &l); +// Note: operators for basic types are defined in IIStream/IOStream // G-API types ///////////////////////////////////////////////////////////////// -GAPI_EXPORTS I::OStream& operator<< (I::OStream& os, cv::util::monostate ); -GAPI_EXPORTS I::IStream& operator>> (I::IStream& is, cv::util::monostate &); +GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::GCompileArg& arg); -GAPI_EXPORTS I::OStream& operator<< (I::OStream& os, cv::GShape shape); -GAPI_EXPORTS I::IStream& operator>> (I::IStream& is, cv::GShape &shape); +GAPI_EXPORTS IOStream& operator<< (IOStream& os, cv::util::monostate ); +GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::util::monostate &); -GAPI_EXPORTS I::OStream& operator<< (I::OStream& os, cv::detail::ArgKind k); -GAPI_EXPORTS I::IStream& operator>> (I::IStream& is, cv::detail::ArgKind &k); +GAPI_EXPORTS IOStream& operator<< (IOStream& os, cv::GShape shape); +GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::GShape &shape); -GAPI_EXPORTS I::OStream& operator<< (I::OStream& os, cv::detail::OpaqueKind k); -GAPI_EXPORTS I::IStream& operator>> (I::IStream& is, cv::detail::OpaqueKind &k); +GAPI_EXPORTS IOStream& operator<< (IOStream& os, cv::detail::ArgKind k); +GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::detail::ArgKind &k); -GAPI_EXPORTS I::OStream& operator<< (I::OStream& os, cv::gimpl::Data::Storage s); -GAPI_EXPORTS I::IStream& operator>> (I::IStream& is, cv::gimpl::Data::Storage &s); +GAPI_EXPORTS IOStream& operator<< (IOStream& os, cv::detail::OpaqueKind k); +GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::detail::OpaqueKind &k); -GAPI_EXPORTS I::OStream& operator<< (I::OStream& os, const cv::gimpl::DataObjectCounter &c); -GAPI_EXPORTS I::IStream& operator>> (I::IStream& is, cv::gimpl::DataObjectCounter &c); +GAPI_EXPORTS IOStream& operator<< (IOStream& os, cv::gimpl::Data::Storage s); +GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::gimpl::Data::Storage &s); -GAPI_EXPORTS I::OStream& operator<< (I::OStream& os, const cv::gimpl::Protocol &p); -GAPI_EXPORTS I::IStream& operator>> (I::IStream& is, cv::gimpl::Protocol &p); +GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::gimpl::DataObjectCounter &c); +GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::gimpl::DataObjectCounter &c); -GAPI_EXPORTS I::OStream& operator<< (I::OStream& os, const cv::GArg &arg); -GAPI_EXPORTS I::IStream& operator>> (I::IStream& is, cv::GArg &arg); +GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::gimpl::Protocol &p); +GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::gimpl::Protocol &p); + +GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::GArg &arg); +GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::GArg &arg); //Forward declaration -//I::OStream& operator<< (I::OStream& os, const cv::GRunArg &arg); -//I::IStream& operator>> (I::IStream& is, cv::GRunArg &arg); +//IOStream& operator<< (IOStream& os, const cv::GRunArg &arg); +//IIStream& operator>> (IIStream& is, cv::GRunArg &arg); -GAPI_EXPORTS I::OStream& operator<< (I::OStream& os, const cv::GKernel &k); -GAPI_EXPORTS I::IStream& operator>> (I::IStream& is, cv::GKernel &k); +GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::GKernel &k); +GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::GKernel &k); -GAPI_EXPORTS I::OStream& operator<< (I::OStream& os, const cv::GMatDesc &d); -GAPI_EXPORTS I::IStream& operator>> (I::IStream& is, cv::GMatDesc &d); +GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::GMatDesc &d); +GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::GMatDesc &d); -GAPI_EXPORTS I::OStream& operator<< (I::OStream& os, const cv::GScalarDesc &); -GAPI_EXPORTS I::IStream& operator>> (I::IStream& is, cv::GScalarDesc &); +GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::GScalarDesc &); +GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::GScalarDesc &); -GAPI_EXPORTS I::OStream& operator<< (I::OStream& os, const cv::GOpaqueDesc &); -GAPI_EXPORTS I::IStream& operator>> (I::IStream& is, cv::GOpaqueDesc &); +GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::GOpaqueDesc &); +GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::GOpaqueDesc &); -GAPI_EXPORTS I::OStream& operator<< (I::OStream& os, const cv::GArrayDesc &); -GAPI_EXPORTS I::IStream& operator>> (I::IStream& is, cv::GArrayDesc &); +GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::GArrayDesc &); +GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::GArrayDesc &); -#if !defined(GAPI_STANDALONE) -GAPI_EXPORTS I::OStream& operator<< (I::OStream& os, const cv::UMat &); -GAPI_EXPORTS I::IStream& operator>> (I::IStream& is, cv::UMat &); -#endif // !defined(GAPI_STANDALONE) +GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::GFrameDesc &); +GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::GFrameDesc &); -GAPI_EXPORTS I::OStream& operator<< (I::OStream& os, const cv::gapi::wip::IStreamSource::Ptr &); -GAPI_EXPORTS I::IStream& operator>> (I::IStream& is, cv::gapi::wip::IStreamSource::Ptr &); +GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::gimpl::RcDesc &rc); +GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::gimpl::RcDesc &rc); -GAPI_EXPORTS I::OStream& operator<< (I::OStream& os, const cv::detail::VectorRef &); -GAPI_EXPORTS I::IStream& operator>> (I::IStream& is, cv::detail::VectorRef &); +GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::gimpl::Op &op); +GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::gimpl::Op &op); -GAPI_EXPORTS I::OStream& operator<< (I::OStream& os, const cv::detail::OpaqueRef &); -GAPI_EXPORTS I::IStream& operator>> (I::IStream& is, cv::detail::OpaqueRef &); +GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::gimpl::Data &op); +GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::gimpl::Data &op); -GAPI_EXPORTS I::OStream& operator<< (I::OStream& os, const cv::gimpl::RcDesc &rc); -GAPI_EXPORTS I::IStream& operator>> (I::IStream& is, cv::gimpl::RcDesc &rc); +// Render types //////////////////////////////////////////////////////////////// -GAPI_EXPORTS I::OStream& operator<< (I::OStream& os, const cv::gimpl::Op &op); -GAPI_EXPORTS I::IStream& operator>> (I::IStream& is, cv::gimpl::Op &op); +GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::gapi::wip::draw::Text &t); +GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::gapi::wip::draw::Text &t); -GAPI_EXPORTS I::OStream& operator<< (I::OStream& os, const cv::gimpl::Data &op); -GAPI_EXPORTS I::IStream& operator>> (I::IStream& is, cv::gimpl::Data &op); +GAPI_EXPORTS IOStream& operator<< (IOStream&, const cv::gapi::wip::draw::FText &); +GAPI_EXPORTS IIStream& operator>> (IIStream&, cv::gapi::wip::draw::FText &); + +GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::gapi::wip::draw::Circle &c); +GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::gapi::wip::draw::Circle &c); + +GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::gapi::wip::draw::Rect &r); +GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::gapi::wip::draw::Rect &r); + +GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::gapi::wip::draw::Image &i); +GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::gapi::wip::draw::Image &i); + +GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::gapi::wip::draw::Mosaic &m); +GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::gapi::wip::draw::Mosaic &m); + +GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::gapi::wip::draw::Poly &p); +GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::gapi::wip::draw::Poly &p); + +GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::gapi::wip::draw::Line &l); +GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::gapi::wip::draw::Line &l); // The top-level serialization routine. // Note it is just a single function which takes a GModel and a list of nodes // and writes the data to the stream (recursively) -GAPI_EXPORTS void serialize( I::OStream& os +GAPI_EXPORTS void serialize( IOStream& os , const ade::Graph &g , const std::vector &nodes); // The top-level serialization routine. // Note it is just a single function which takes a GModel and a list of nodes // and writes the data to the stream (recursively) -GAPI_EXPORTS void serialize( I::OStream& os +GAPI_EXPORTS void serialize( IOStream& os , const ade::Graph &g , const cv::gimpl::Protocol &p , const std::vector &nodes); @@ -214,160 +155,73 @@ GAPI_EXPORTS void serialize( I::OStream& os // Summarizing, the `deserialize()` happens *once per GComputation* immediately // during the cv::gapi::deserialize(), and `reconstruct()` happens // on every compilation process issued for this GComputation. -GAPI_EXPORTS GSerialized deserialize(I::IStream& is); +GAPI_EXPORTS GSerialized deserialize(IIStream& is); GAPI_EXPORTS void reconstruct(const GSerialized &s, ade::Graph &g); -// Legacy ////////////////////////////////////////////////////////////////////// -// Generic: unordered_map serialization //////////////////////////////////////// -template -I::OStream& operator<< (I::OStream& os, const std::unordered_map &m) { - //const std::size_t sz = m.size(); // explicitly specify type - const uint32_t sz = (uint32_t)m.size(); // explicitly specify type - os << sz; - for (auto &&it : m) os << it.first << it.second; - return os; -} -template -I::IStream& operator>> (I::IStream& is, std::unordered_map &m) { - m.clear(); - //std::size_t sz = 0u; - uint32_t sz = 0u; - is >> sz; - if (sz != 0u) { - for (auto &&i : ade::util::iota(sz)) { - (void) i; - K k{}; - V v{}; - is >> k >> v; - m.insert({k,v}); - } - GAPI_Assert(sz == m.size()); - } - return is; -} - -// Generic: variant serialization ////////////////////////////////////////////// -namespace detail { // FIXME: breaks old code -template -I::OStream& put_v(I::OStream&, const V&, std::size_t) { - GAPI_Assert(false && "variant>>: requested index is invalid"); -}; -template -I::OStream& put_v(I::OStream& os, const V& v, std::size_t x) { - return (x == 0u) - ? os << cv::util::get(v) - : put_v(os, v, x-1); -} -template -I::IStream& get_v(I::IStream&, V&, std::size_t, std::size_t) { - GAPI_Assert(false && "variant<<: requested index is invalid"); -} -template -I::IStream& get_v(I::IStream& is, V& v, std::size_t i, std::size_t gi) { - if (i == gi) { - X x{}; - is >> x; - v = std::move(x); - return is; - } else return get_v(is, v, i+1, gi); -} -} // namespace detail FIXME: breaks old code - -template -I::OStream& operator<< (I::OStream& os, const cv::util::variant &v) { - os << (uint32_t)v.index(); - return detail::put_v, Ts...>(os, v, v.index()); -} -template -I::IStream& operator>> (I::IStream& is, cv::util::variant &v) { - int idx = -1; - is >> idx; - GAPI_Assert(idx >= 0 && idx < (int)sizeof...(Ts)); - return detail::get_v, Ts...>(is, v, 0u, idx); -} - -// Generic: vector serialization /////////////////////////////////////////////// -// Moved here to fix CLang issues https://clang.llvm.org/compatibility.html -// Unqualified lookup in templates -template -I::OStream& operator<< (I::OStream& os, const std::vector &ts) { - //const std::size_t sz = ts.size(); // explicitly specify type - const uint32_t sz = (uint32_t)ts.size(); // explicitly specify type - os << sz; - for (auto &&v : ts) os << v; - return os; -} -template -I::IStream& operator >> (I::IStream& is, std::vector &ts) { - //std::size_t sz = 0u; - uint32_t sz = 0u; - is >> sz; - if (sz == 0u) { - ts.clear(); - } - else { - ts.resize(sz); - for (auto &&i : ade::util::iota(sz)) is >> ts[i]; - } - return is; -} - // FIXME: Basic Stream implementaions ////////////////////////////////////////// // Basic in-memory stream implementations. -class GAPI_EXPORTS ByteMemoryOutStream final: public I::OStream { +class GAPI_EXPORTS ByteMemoryOutStream final: public IOStream { std::vector m_storage; - //virtual I::OStream& operator << (uint32_t) override; - //virtual I::OStream& operator<< (uint32_t) final; + //virtual IOStream& operator << (uint32_t) override; + //virtual IOStream& operator<< (uint32_t) final; public: const std::vector& data() const; - virtual I::OStream& operator<< (bool) override; - virtual I::OStream& operator<< (char) override; - virtual I::OStream& operator<< (unsigned char) override; - virtual I::OStream& operator<< (short) override; - virtual I::OStream& operator<< (unsigned short) override; - virtual I::OStream& operator<< (int) override; - //virtual I::OStream& operator<< (std::size_t) override; - virtual I::OStream& operator<< (float) override; - virtual I::OStream& operator<< (double) override; - virtual I::OStream& operator<< (const std::string&) override; - virtual I::OStream& operator<< (uint32_t) override; + virtual IOStream& operator<< (bool) override; + virtual IOStream& operator<< (char) override; + virtual IOStream& operator<< (unsigned char) override; + virtual IOStream& operator<< (short) override; + virtual IOStream& operator<< (unsigned short) override; + virtual IOStream& operator<< (int) override; + //virtual IOStream& operator<< (std::size_t) override; + virtual IOStream& operator<< (float) override; + virtual IOStream& operator<< (double) override; + virtual IOStream& operator<< (const std::string&) override; + virtual IOStream& operator<< (uint32_t) override; + virtual IOStream& operator<< (uint64_t) override; }; -class GAPI_EXPORTS ByteMemoryInStream final: public I::IStream { +class GAPI_EXPORTS ByteMemoryInStream final: public IIStream { const std::vector& m_storage; size_t m_idx = 0u; void check(std::size_t n) { (void) n; GAPI_DbgAssert(m_idx+n-1 < m_storage.size()); } uint32_t getU32() { uint32_t v{}; *this >> v; return v; }; - //virtual I::IStream& operator>> (uint32_t &) final; + //virtual IIStream& operator>> (uint32_t &) final; public: explicit ByteMemoryInStream(const std::vector &data); - virtual I::IStream& operator>> (bool &) override; - virtual I::IStream& operator>> (char &) override; - virtual I::IStream& operator>> (unsigned char &) override; - virtual I::IStream& operator>> (short &) override; - virtual I::IStream& operator>> (unsigned short &) override; - virtual I::IStream& operator>> (int &) override; - virtual I::IStream& operator>> (float &) override; - virtual I::IStream& operator>> (double &) override; - //virtual I::IStream& operator>> (std::size_t &) override; - virtual I::IStream& operator >> (uint32_t &) override; - virtual I::IStream& operator>> (std::string &) override; + virtual IIStream& operator>> (bool &) override; + virtual IIStream& operator>> (std::vector::reference) override; + virtual IIStream& operator>> (char &) override; + virtual IIStream& operator>> (unsigned char &) override; + virtual IIStream& operator>> (short &) override; + virtual IIStream& operator>> (unsigned short &) override; + virtual IIStream& operator>> (int &) override; + virtual IIStream& operator>> (float &) override; + virtual IIStream& operator>> (double &) override; + //virtual IIStream& operator>> (std::size_t &) override; + virtual IIStream& operator >> (uint32_t &) override; + virtual IIStream& operator >> (uint64_t &) override; + virtual IIStream& operator>> (std::string &) override; }; -GAPI_EXPORTS void serialize(I::OStream& os, const cv::GMetaArgs &ma); -GAPI_EXPORTS void serialize(I::OStream& os, const cv::GRunArgs &ra); -GAPI_EXPORTS GMetaArgs meta_args_deserialize(I::IStream& is); -GAPI_EXPORTS GRunArgs run_args_deserialize(I::IStream& is); +namespace detail { +GAPI_EXPORTS std::unique_ptr getInStream(const std::vector &p); +} // namespace detail + +GAPI_EXPORTS void serialize(IOStream& os, const cv::GCompileArgs &ca); +GAPI_EXPORTS void serialize(IOStream& os, const cv::GMetaArgs &ma); +GAPI_EXPORTS void serialize(IOStream& os, const cv::GRunArgs &ra); +GAPI_EXPORTS GMetaArgs meta_args_deserialize(IIStream& is); +GAPI_EXPORTS GRunArgs run_args_deserialize(IIStream& is); } // namespace s11n -} // namespace gimpl +} // namespace gapi } // namespace cv #endif // OPENCV_GAPI_COMMON_SERIALIZATION_HPP diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/backends/cpu/gcpubackend.cpp b/inference-engine/thirdparty/fluid/modules/gapi/src/backends/cpu/gcpubackend.cpp index 780c0e9241f..5e2540365b3 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/src/backends/cpu/gcpubackend.cpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/backends/cpu/gcpubackend.cpp @@ -128,9 +128,10 @@ cv::GArg cv::gimpl::GCPUExecutable::packArg(const GArg &arg) // No API placeholders allowed at this point // FIXME: this check has to be done somewhere in compilation stage. GAPI_Assert( arg.kind != cv::detail::ArgKind::GMAT - && arg.kind != cv::detail::ArgKind::GSCALAR - && arg.kind != cv::detail::ArgKind::GARRAY - && arg.kind != cv::detail::ArgKind::GOPAQUE); + && arg.kind != cv::detail::ArgKind::GSCALAR + && arg.kind != cv::detail::ArgKind::GARRAY + && arg.kind != cv::detail::ArgKind::GOPAQUE + && arg.kind != cv::detail::ArgKind::GFRAME); if (arg.kind != cv::detail::ArgKind::GOBJREF) { @@ -150,6 +151,7 @@ cv::GArg cv::gimpl::GCPUExecutable::packArg(const GArg &arg) // (and constructed by either bindIn/Out or resetInternal) case GShape::GARRAY: return GArg(m_res.slot().at(ref.id)); case GShape::GOPAQUE: return GArg(m_res.slot().at(ref.id)); + case GShape::GFRAME: return GArg(m_res.slot().at(ref.id)); default: util::throw_error(std::logic_error("Unsupported GShape type")); break; @@ -276,4 +278,8 @@ void cv::gimpl::GCPUExecutable::run(std::vector &&input_objs, } // for(m_script) for (auto &it : output_objs) magazine::writeBack(m_res, it.first, it.second); + + // In/Out args clean-up is mandatory now with RMat + for (auto &it : input_objs) magazine::unbind(m_res, it.first); + for (auto &it : output_objs) magazine::unbind(m_res, it.first); } diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/backends/cpu/gcpucore.cpp b/inference-engine/thirdparty/fluid/modules/gapi/src/backends/cpu/gcpucore.cpp index d9c3c3ae2a3..3e6ce1c1d48 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/src/backends/cpu/gcpucore.cpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/backends/cpu/gcpucore.cpp @@ -2,7 +2,7 @@ // It is subject to the license terms in the LICENSE file found in the top-level directory // of this distribution and at http://opencv.org/license.html. // -// Copyright (C) 2018 Intel Corporation +// Copyright (C) 2018-2020 Intel Corporation #include "precomp.hpp" @@ -342,6 +342,14 @@ GAPI_OCV_KERNEL(GCPUSum, cv::gapi::core::GSum) } }; +GAPI_OCV_KERNEL(GCPUCountNonZero, cv::gapi::core::GCountNonZero) +{ + static void run(const cv::Mat& in, int& out) + { + out = cv::countNonZero(in); + } +}; + GAPI_OCV_KERNEL(GCPUAddW, cv::gapi::core::GAddW) { static void run(const cv::Mat& in1, double alpha, const cv::Mat& in2, double beta, double gamma, int dtype, cv::Mat& out) @@ -577,6 +585,63 @@ GAPI_OCV_KERNEL(GCPUWarpAffine, cv::gapi::core::GWarpAffine) } }; +GAPI_OCV_KERNEL(GCPUKMeansND, cv::gapi::core::GKMeansND) +{ + static void run(const cv::Mat& data, const int K, const cv::Mat& inBestLabels, + const cv::TermCriteria& criteria, const int attempts, + const cv::KmeansFlags flags, + double& compactness, cv::Mat& outBestLabels, cv::Mat& centers) + { + if (flags & cv::KMEANS_USE_INITIAL_LABELS) + { + inBestLabels.copyTo(outBestLabels); + } + compactness = cv::kmeans(data, K, outBestLabels, criteria, attempts, flags, centers); + } +}; + +GAPI_OCV_KERNEL(GCPUKMeansNDNoInit, cv::gapi::core::GKMeansNDNoInit) +{ + static void run(const cv::Mat& data, const int K, const cv::TermCriteria& criteria, + const int attempts, const cv::KmeansFlags flags, + double& compactness, cv::Mat& outBestLabels, cv::Mat& centers) + { + compactness = cv::kmeans(data, K, outBestLabels, criteria, attempts, flags, centers); + } +}; + +GAPI_OCV_KERNEL(GCPUKMeans2D, cv::gapi::core::GKMeans2D) +{ + static void run(const std::vector& data, const int K, + const std::vector& inBestLabels, const cv::TermCriteria& criteria, + const int attempts, const cv::KmeansFlags flags, + double& compactness, std::vector& outBestLabels, + std::vector& centers) + { + if (flags & cv::KMEANS_USE_INITIAL_LABELS) + { + outBestLabels = inBestLabels; + } + compactness = cv::kmeans(data, K, outBestLabels, criteria, attempts, flags, centers); + } +}; + +GAPI_OCV_KERNEL(GCPUKMeans3D, cv::gapi::core::GKMeans3D) +{ + static void run(const std::vector& data, const int K, + const std::vector& inBestLabels, const cv::TermCriteria& criteria, + const int attempts, const cv::KmeansFlags flags, + double& compactness, std::vector& outBestLabels, + std::vector& centers) + { + if (flags & cv::KMEANS_USE_INITIAL_LABELS) + { + outBestLabels = inBestLabels; + } + compactness = cv::kmeans(data, K, outBestLabels, criteria, attempts, flags, centers); + } +}; + GAPI_OCV_KERNEL(GCPUParseSSDBL, cv::gapi::nn::parsers::GParseSSDBL) { static void run(const cv::Mat& in_ssd_result, @@ -617,7 +682,7 @@ GAPI_OCV_KERNEL(GCPUParseYolo, cv::gapi::nn::parsers::GParseYolo) } }; -GAPI_OCV_KERNEL(GCPUSize, cv::gapi::core::GSize) +GAPI_OCV_KERNEL(GCPUSize, cv::gapi::streaming::GSize) { static void run(const cv::Mat& in, cv::Size& out) { @@ -626,7 +691,7 @@ GAPI_OCV_KERNEL(GCPUSize, cv::gapi::core::GSize) } }; -GAPI_OCV_KERNEL(GCPUSizeR, cv::gapi::core::GSizeR) +GAPI_OCV_KERNEL(GCPUSizeR, cv::gapi::streaming::GSizeR) { static void run(const cv::Rect& in, cv::Size& out) { @@ -679,6 +744,7 @@ cv::gapi::GKernelPackage cv::gapi::core::cpu::kernels() , GCPUAbsDiff , GCPUAbsDiffC , GCPUSum + , GCPUCountNonZero , GCPUAddW , GCPUNormL1 , GCPUNormL2 @@ -705,6 +771,10 @@ cv::gapi::GKernelPackage cv::gapi::core::cpu::kernels() , GCPUNormalize , GCPUWarpPerspective , GCPUWarpAffine + , GCPUKMeansND + , GCPUKMeansNDNoInit + , GCPUKMeans2D + , GCPUKMeans3D , GCPUParseSSDBL , GOCVParseSSD , GCPUParseYolo diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/backends/cpu/gcpuimgproc.cpp b/inference-engine/thirdparty/fluid/modules/gapi/src/backends/cpu/gcpuimgproc.cpp index 8104565f03d..6cbf0d32f06 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/src/backends/cpu/gcpuimgproc.cpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/backends/cpu/gcpuimgproc.cpp @@ -145,6 +145,16 @@ GAPI_OCV_KERNEL(GCPUDilate, cv::gapi::imgproc::GDilate) } }; +GAPI_OCV_KERNEL(GCPUMorphologyEx, cv::gapi::imgproc::GMorphologyEx) +{ + static void run(const cv::Mat &in, const cv::MorphTypes op, const cv::Mat &kernel, + const cv::Point &anchor, const int iterations, + const cv::BorderTypes borderType, const cv::Scalar &borderValue, cv::Mat &out) + { + cv::morphologyEx(in, out, op, kernel, anchor, iterations, borderType, borderValue); + } +}; + GAPI_OCV_KERNEL(GCPUSobel, cv::gapi::imgproc::GSobel) { static void run(const cv::Mat& in, int ddepth, int dx, int dy, int ksize, double scale, double delta, int borderType, @@ -211,6 +221,182 @@ GAPI_OCV_KERNEL(GCPUGoodFeatures, cv::gapi::imgproc::GGoodFeatures) } }; +GAPI_OCV_KERNEL(GCPUFindContours, cv::gapi::imgproc::GFindContours) +{ + static void run(const cv::Mat& image, const cv::RetrievalModes mode, + const cv::ContourApproximationModes method, const cv::Point& offset, + std::vector> &outConts) + { + cv::findContours(image, outConts, mode, method, offset); + } +}; + +GAPI_OCV_KERNEL(GCPUFindContoursNoOffset, cv::gapi::imgproc::GFindContoursNoOffset) +{ + static void run(const cv::Mat& image, const cv::RetrievalModes mode, + const cv::ContourApproximationModes method, + std::vector> &outConts) + { + cv::findContours(image, outConts, mode, method); + } +}; + +GAPI_OCV_KERNEL(GCPUFindContoursH, cv::gapi::imgproc::GFindContoursH) +{ + static void run(const cv::Mat& image, const cv::RetrievalModes mode, + const cv::ContourApproximationModes method, const cv::Point& offset, + std::vector> &outConts, std::vector &outHier) + { + cv::findContours(image, outConts, outHier, mode, method, offset); + } +}; + +GAPI_OCV_KERNEL(GCPUFindContoursHNoOffset, cv::gapi::imgproc::GFindContoursHNoOffset) +{ + static void run(const cv::Mat& image, const cv::RetrievalModes mode, + const cv::ContourApproximationModes method, + std::vector> &outConts, std::vector &outHier) + { + cv::findContours(image, outConts, outHier, mode, method); + } +}; + +GAPI_OCV_KERNEL(GCPUBoundingRectMat, cv::gapi::imgproc::GBoundingRectMat) +{ + static void run(const cv::Mat& in, cv::Rect& out) + { + out = cv::boundingRect(in); + } +}; + +GAPI_OCV_KERNEL(GCPUBoundingRectVector32S, cv::gapi::imgproc::GBoundingRectVector32S) +{ + static void run(const std::vector& in, cv::Rect& out) + { + out = cv::boundingRect(in); + } +}; + +GAPI_OCV_KERNEL(GCPUBoundingRectVector32F, cv::gapi::imgproc::GBoundingRectVector32F) +{ + static void run(const std::vector& in, cv::Rect& out) + { + out = cv::boundingRect(in); + } +}; + +GAPI_OCV_KERNEL(GCPUFitLine2DMat, cv::gapi::imgproc::GFitLine2DMat) +{ + static void run(const cv::Mat& in, const cv::DistanceTypes distType, const double param, + const double reps, const double aeps, cv::Vec4f& out) + { + cv::fitLine(in, out, distType, param, reps, aeps); + } +}; + +GAPI_OCV_KERNEL(GCPUFitLine2DVector32S, cv::gapi::imgproc::GFitLine2DVector32S) +{ + static void run(const std::vector& in, const cv::DistanceTypes distType, + const double param, const double reps, const double aeps, cv::Vec4f& out) + { + cv::fitLine(in, out, distType, param, reps, aeps); + } +}; + +GAPI_OCV_KERNEL(GCPUFitLine2DVector32F, cv::gapi::imgproc::GFitLine2DVector32F) +{ + static void run(const std::vector& in, const cv::DistanceTypes distType, + const double param, const double reps, const double aeps, cv::Vec4f& out) + { + cv::fitLine(in, out, distType, param, reps, aeps); + } +}; + +GAPI_OCV_KERNEL(GCPUFitLine2DVector64F, cv::gapi::imgproc::GFitLine2DVector64F) +{ + static void run(const std::vector& in, const cv::DistanceTypes distType, + const double param, const double reps, const double aeps, cv::Vec4f& out) + { + cv::fitLine(in, out, distType, param, reps, aeps); + } +}; + +GAPI_OCV_KERNEL(GCPUFitLine3DMat, cv::gapi::imgproc::GFitLine3DMat) +{ + static void run(const cv::Mat& in, const cv::DistanceTypes distType, const double param, + const double reps, const double aeps, cv::Vec6f& out) + { + cv::fitLine(in, out, distType, param, reps, aeps); + } +}; + +GAPI_OCV_KERNEL(GCPUFitLine3DVector32S, cv::gapi::imgproc::GFitLine3DVector32S) +{ + static void run(const std::vector& in, const cv::DistanceTypes distType, + const double param, const double reps, const double aeps, cv::Vec6f& out) + { + cv::fitLine(in, out, distType, param, reps, aeps); + } +}; + +GAPI_OCV_KERNEL(GCPUFitLine3DVector32F, cv::gapi::imgproc::GFitLine3DVector32F) +{ + static void run(const std::vector& in, const cv::DistanceTypes distType, + const double param, const double reps, const double aeps, cv::Vec6f& out) + { + cv::fitLine(in, out, distType, param, reps, aeps); + } +}; + +GAPI_OCV_KERNEL(GCPUFitLine3DVector64F, cv::gapi::imgproc::GFitLine3DVector64F) +{ + static void run(const std::vector& in, const cv::DistanceTypes distType, + const double param, const double reps, const double aeps, cv::Vec6f& out) + { + cv::fitLine(in, out, distType, param, reps, aeps); + } +}; + +GAPI_OCV_KERNEL(GCPUBGR2RGB, cv::gapi::imgproc::GBGR2RGB) +{ + static void run(const cv::Mat& in, cv::Mat &out) + { + cv::cvtColor(in, out, cv::COLOR_BGR2RGB); + } +}; + +GAPI_OCV_KERNEL(GCPUBGR2I420, cv::gapi::imgproc::GBGR2I420) +{ + static void run(const cv::Mat& in, cv::Mat &out) + { + cv::cvtColor(in, out, cv::COLOR_BGR2YUV_I420); + } +}; + +GAPI_OCV_KERNEL(GCPURGB2I420, cv::gapi::imgproc::GRGB2I420) +{ + static void run(const cv::Mat& in, cv::Mat &out) + { + cv::cvtColor(in, out, cv::COLOR_RGB2YUV_I420); + } +}; + +GAPI_OCV_KERNEL(GCPUI4202BGR, cv::gapi::imgproc::GI4202BGR) +{ + static void run(const cv::Mat& in, cv::Mat &out) + { + cv::cvtColor(in, out, cv::COLOR_YUV2BGR_I420); + } +}; + +GAPI_OCV_KERNEL(GCPUI4202RGB, cv::gapi::imgproc::GI4202RGB) +{ + static void run(const cv::Mat& in, cv::Mat &out) + { + cv::cvtColor(in, out, cv::COLOR_YUV2RGB_I420); + } +}; + GAPI_OCV_KERNEL(GCPURGB2YUV, cv::gapi::imgproc::GRGB2YUV) { static void run(const cv::Mat& in, cv::Mat &out) @@ -438,6 +624,7 @@ cv::gapi::GKernelPackage cv::gapi::imgproc::cpu::kernels() , GCPUMedianBlur , GCPUErode , GCPUDilate + , GCPUMorphologyEx , GCPUSobel , GCPUSobelXY , GCPULaplacian @@ -445,8 +632,28 @@ cv::gapi::GKernelPackage cv::gapi::imgproc::cpu::kernels() , GCPUCanny , GCPUGoodFeatures , GCPUEqualizeHist + , GCPUFindContours + , GCPUFindContoursNoOffset + , GCPUFindContoursH + , GCPUFindContoursHNoOffset + , GCPUBGR2RGB , GCPURGB2YUV + , GCPUBoundingRectMat + , GCPUBoundingRectVector32S + , GCPUBoundingRectVector32F + , GCPUFitLine2DMat + , GCPUFitLine2DVector32S + , GCPUFitLine2DVector32F + , GCPUFitLine2DVector64F + , GCPUFitLine3DMat + , GCPUFitLine3DVector32S + , GCPUFitLine3DVector32F + , GCPUFitLine3DVector64F , GCPUYUV2RGB + , GCPUBGR2I420 + , GCPURGB2I420 + , GCPUI4202BGR + , GCPUI4202RGB , GCPUNV12toRGB , GCPUNV12toBGR , GCPURGB2Lab diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/backends/cpu/gcpukernel.cpp b/inference-engine/thirdparty/fluid/modules/gapi/src/backends/cpu/gcpukernel.cpp index 0d8d7379b67..4497952c87f 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/src/backends/cpu/gcpukernel.cpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/backends/cpu/gcpukernel.cpp @@ -41,6 +41,11 @@ cv::detail::OpaqueRef& cv::GCPUContext::outOpaqueRef(int output) return util::get(m_results.at(output)); } +cv::MediaFrame& cv::GCPUContext::outFrame(int output) +{ + return *util::get(m_results.at(output)); +} + cv::GCPUKernel::GCPUKernel() { } diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/backends/cpu/gcpuvideo.cpp b/inference-engine/thirdparty/fluid/modules/gapi/src/backends/cpu/gcpuvideo.cpp index ac8e9e40036..075b5f9ad50 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/src/backends/cpu/gcpuvideo.cpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/backends/cpu/gcpuvideo.cpp @@ -80,12 +80,109 @@ GAPI_OCV_KERNEL(GCPUCalcOptFlowLKForPyr, cv::gapi::video::GCalcOptFlowLKForPyr) } }; +GAPI_OCV_KERNEL_ST(GCPUBackgroundSubtractor, + cv::gapi::video::GBackgroundSubtractor, + cv::BackgroundSubtractor) +{ + static void setup(const cv::GMatDesc&, const cv::gapi::video::BackgroundSubtractorParams& bsParams, + std::shared_ptr& state, + const cv::GCompileArgs&) + { + if (bsParams.operation == cv::gapi::video::TYPE_BS_MOG2) + state = cv::createBackgroundSubtractorMOG2(bsParams.history, + bsParams.threshold, + bsParams.detectShadows); + else if (bsParams.operation == cv::gapi::video::TYPE_BS_KNN) + state = cv::createBackgroundSubtractorKNN(bsParams.history, + bsParams.threshold, + bsParams.detectShadows); + + GAPI_Assert(state); + } + + static void run(const cv::Mat& in, const cv::gapi::video::BackgroundSubtractorParams& bsParams, + cv::Mat &out, cv::BackgroundSubtractor& state) + { + state.apply(in, out, bsParams.learningRate); + } +}; + +GAPI_OCV_KERNEL_ST(GCPUKalmanFilter, cv::gapi::video::GKalmanFilter, cv::KalmanFilter) +{ + static void setup(const cv::GMatDesc&, const cv::GOpaqueDesc&, + const cv::GMatDesc&, const cv::gapi::KalmanParams& kfParams, + std::shared_ptr &state, const cv::GCompileArgs&) + { + state = std::make_shared(kfParams.transitionMatrix.rows, kfParams.measurementMatrix.rows, + kfParams.controlMatrix.cols, kfParams.transitionMatrix.type()); + + // initial state + state->statePost = kfParams.state; + state->errorCovPost = kfParams.errorCov; + + // dynamic system initialization + state->controlMatrix = kfParams.controlMatrix; + state->measurementMatrix = kfParams.measurementMatrix; + state->transitionMatrix = kfParams.transitionMatrix; + state->processNoiseCov = kfParams.processNoiseCov; + state->measurementNoiseCov = kfParams.measurementNoiseCov; + } + + static void run(const cv::Mat& measurements, bool haveMeasurement, + const cv::Mat& control, const cv::gapi::KalmanParams&, + cv::Mat &out, cv::KalmanFilter& state) + { + cv::Mat pre = state.predict(control); + + if (haveMeasurement) + state.correct(measurements).copyTo(out); + else + pre.copyTo(out); + } +}; + +GAPI_OCV_KERNEL_ST(GCPUKalmanFilterNoControl, cv::gapi::video::GKalmanFilterNoControl, cv::KalmanFilter) +{ + static void setup(const cv::GMatDesc&, const cv::GOpaqueDesc&, + const cv::gapi::KalmanParams& kfParams, + std::shared_ptr &state, + const cv::GCompileArgs&) + { + state = std::make_shared(kfParams.transitionMatrix.rows, kfParams.measurementMatrix.rows, + 0, kfParams.transitionMatrix.type()); + // initial state + state->statePost = kfParams.state; + state->errorCovPost = kfParams.errorCov; + + // dynamic system initialization + state->measurementMatrix = kfParams.measurementMatrix; + state->transitionMatrix = kfParams.transitionMatrix; + state->processNoiseCov = kfParams.processNoiseCov; + state->measurementNoiseCov = kfParams.measurementNoiseCov; + } + + static void run(const cv::Mat& measurements, bool haveMeasurement, + const cv::gapi::KalmanParams&, cv::Mat &out, + cv::KalmanFilter& state) + { + cv::Mat pre = state.predict(); + + if (haveMeasurement) + state.correct(measurements).copyTo(out); + else + pre.copyTo(out); + } +}; + cv::gapi::GKernelPackage cv::gapi::video::cpu::kernels() { static auto pkg = cv::gapi::kernels < GCPUBuildOptFlowPyramid , GCPUCalcOptFlowLK , GCPUCalcOptFlowLKForPyr + , GCPUBackgroundSubtractor + , GCPUKalmanFilter + , GCPUKalmanFilterNoControl >(); return pkg; } diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/backends/cpu/gnnparsers.cpp b/inference-engine/thirdparty/fluid/modules/gapi/src/backends/cpu/gnnparsers.cpp index 234382d530d..a5e4bf5f856 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/src/backends/cpu/gnnparsers.cpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/backends/cpu/gnnparsers.cpp @@ -246,6 +246,28 @@ void parseSSD(const cv::Mat& in_ssd_result, } } +static void checkYoloDims(const MatSize& dims) { + const auto d = dims.dims(); + // Accept 1x13x13xN and 13x13xN + GAPI_Assert(d >= 2); + if (d >= 3) { + if (dims[d-2] == 13) { + GAPI_Assert(dims[d-1]%5 == 0); + GAPI_Assert(dims[d-2] == 13); + GAPI_Assert(dims[d-3] == 13); + for (int i = 0; i < d-3; i++) { + GAPI_Assert(dims[i] == 1); + } + return; + } + } + // Accept 1x1x1xN, 1x1xN, 1xN + GAPI_Assert(dims[d-1]%(5*13*13) == 0); + for (int i = 0; i < d-1; i++) { + GAPI_Assert(dims[i] == 1); + } +} + void parseYolo(const cv::Mat& in_yolo_result, const cv::Size& in_size, const float confidence_threshold, @@ -255,12 +277,12 @@ void parseYolo(const cv::Mat& in_yolo_result, std::vector& out_labels) { const auto& dims = in_yolo_result.size; - GAPI_Assert(dims.dims() == 4); - GAPI_Assert(dims[0] == 1); - GAPI_Assert(dims[1] == 13); - GAPI_Assert(dims[2] == 13); - GAPI_Assert(dims[3] % 5 == 0); // 5 boxes - const auto num_classes = dims[3] / 5 - 5; + checkYoloDims(dims); + int acc = 1; + for (int i = 0; i < dims.dims(); i++) { + acc *= dims[i]; + } + const auto num_classes = acc/(5*13*13)-5; GAPI_Assert(num_classes > 0); GAPI_Assert(0 < nms_threshold && nms_threshold <= 1); out_boxes.clear(); diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/backends/fluid/gfluidbackend.cpp b/inference-engine/thirdparty/fluid/modules/gapi/src/backends/fluid/gfluidbackend.cpp index e103c70f787..030bb101989 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/src/backends/fluid/gfluidbackend.cpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/backends/fluid/gfluidbackend.cpp @@ -952,7 +952,7 @@ namespace using namespace cv::gimpl; GModel::Graph g(graph); GFluidModel fg(graph); - for (const auto node : g.nodes()) + for (const auto& node : g.nodes()) { if (g.metadata(node).get().t == NodeType::DATA) { @@ -1243,41 +1243,25 @@ void cv::gimpl::GFluidExecutable::reshape(ade::Graph &g, const GCompileArgs &arg // FIXME: Document what it does void cv::gimpl::GFluidExecutable::bindInArg(const cv::gimpl::RcDesc &rc, const GRunArg &arg) { - switch (rc.shape) - { - case GShape::GMAT: m_buffers[m_id_map.at(rc.id)].priv().bindTo(util::get(arg), true); break; - case GShape::GSCALAR: m_res.slot()[rc.id] = util::get(arg); break; - case GShape::GARRAY: m_res.slot()[rc.id] = util::get(arg); break; - case GShape::GOPAQUE: m_res.slot()[rc.id] = util::get(arg); break; - default: util::throw_error(std::logic_error("Unsupported input GShape type")); + magazine::bindInArg(m_res, rc, arg); + if (rc.shape == GShape::GMAT) { + auto& mat = m_res.slot()[rc.id]; + // fluid::Buffer::bindTo() is not connected to magazine::bindIn/OutArg and unbind() calls, + // it's simply called each run() without any requirement to call some fluid-specific + // unbind() at the end of run() + m_buffers[m_id_map.at(rc.id)].priv().bindTo(mat, true); } } void cv::gimpl::GFluidExecutable::bindOutArg(const cv::gimpl::RcDesc &rc, const GRunArgP &arg) { // Only GMat is supported as return type - using T = GRunArgP; - switch (rc.shape) - { - case GShape::GMAT: - { - cv::GMatDesc desc = m_buffers[m_id_map.at(rc.id)].meta(); - auto &bref = m_buffers[m_id_map.at(rc.id)].priv(); - - switch (arg.index()) { - // FIXME: See the bindInArg comment on Streaming-related changes - case T::index_of(): { - auto &outMat = *util::get(arg); - GAPI_Assert(outMat.data != nullptr); - GAPI_Assert(cv::descr_of(outMat) == desc && "Output argument was not preallocated as it should be ?"); - bref.bindTo(outMat, false); - } break; - default: GAPI_Assert(false); - } // switch(arg.index()) - break; - } - default: util::throw_error(std::logic_error("Unsupported return GShape type")); + if (rc.shape != GShape::GMAT) { + util::throw_error(std::logic_error("Unsupported return GShape type")); } + magazine::bindOutArg(m_res, rc, arg); + auto& mat = m_res.slot()[rc.id]; + m_buffers[m_id_map.at(rc.id)].priv().bindTo(mat, false); } void cv::gimpl::GFluidExecutable::packArg(cv::GArg &in_arg, const cv::GArg &op_arg) @@ -1383,6 +1367,10 @@ void cv::gimpl::GFluidExecutable::run(std::vector &input_objs, agent->doWork(); } } + + // In/Out args clean-up is mandatory now with RMat + for (auto &it : input_objs) magazine::unbind(m_res, it.first); + for (auto &it : output_objs) magazine::unbind(m_res, it.first); } cv::gimpl::GParallelFluidExecutable::GParallelFluidExecutable(const ade::Graph &g, @@ -1452,7 +1440,7 @@ void GFluidBackendImpl::addMetaSensitiveBackendPasses(ade::ExecutionEngineSetupC { // Add FluidData to all data nodes inside island, // set internal = true if node is not a slot in terms of higher-level GIslandModel - for (const auto node : isl->contents()) + for (const auto& node : isl->contents()) { if (g.metadata(node).get().t == NodeType::DATA && !fg.metadata(node).contains()) diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/backends/fluid/gfluidbackend.hpp b/inference-engine/thirdparty/fluid/modules/gapi/src/backends/fluid/gfluidbackend.hpp index 282b0668b2d..43174cc1d31 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/src/backends/fluid/gfluidbackend.hpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/backends/fluid/gfluidbackend.hpp @@ -128,8 +128,7 @@ class GFluidExecutable final: public GIslandExecutable std::vector m_script; - using Magazine = detail::magazine; - Magazine m_res; + cv::gimpl::Mag m_res; std::size_t m_num_int_buffers; // internal buffers counter (m_buffers - num_scratch) std::vector m_scratch_users; diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/backends/fluid/gfluidcore.cpp b/inference-engine/thirdparty/fluid/modules/gapi/src/backends/fluid/gfluidcore.cpp index a6f8d56e4c5..edc91f01797 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/src/backends/fluid/gfluidcore.cpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/backends/fluid/gfluidcore.cpp @@ -151,6 +151,348 @@ GAPI_FLUID_KERNEL(GFluidAddW, cv::gapi::core::GAddW, false) enum Arithm { ARITHM_ABSDIFF, ARITHM_ADD, ARITHM_SUBTRACT, ARITHM_MULTIPLY, ARITHM_DIVIDE }; +#if CV_SIMD +CV_ALWAYS_INLINE void absdiff_store(short out[], const v_int16& a, const v_int16& b, int x) +{ + vx_store(&out[x], v_absdiffs(a, b)); +} + +CV_ALWAYS_INLINE void absdiff_store(ushort out[], const v_uint16& a, const v_uint16& b, int x) +{ + vx_store(&out[x], v_absdiff(a, b)); +} + +CV_ALWAYS_INLINE void absdiff_store(uchar out[], const v_uint8& a, const v_uint8& b, int x) +{ + vx_store(&out[x], v_absdiff(a, b)); +} + +CV_ALWAYS_INLINE void absdiff_store(float out[], const v_float32& a, const v_float32& b, int x) +{ + vx_store(&out[x], v_absdiff(a, b)); +} + +template +CV_ALWAYS_INLINE int absdiff_impl(const T in1[], const T in2[], T out[], int length) +{ + constexpr int nlanes = static_cast(VT::nlanes); + + if (length < nlanes) + return 0; + + int x = 0; + for (;;) + { + for (; x <= length - nlanes; x += nlanes) + { + VT a = vx_load(&in1[x]); + VT b = vx_load(&in2[x]); + absdiff_store(out, a, b, x); + } + + if (x < length && (in1 != out) && (in2 != out)) + { + x = length - nlanes; + continue; // process one more time (unaligned tail) + } + break; + } + + return x; +} + +template +CV_ALWAYS_INLINE int absdiff_simd(const T in1[], const T in2[], T out[], int length) +{ + if (std::is_same::value) + { + return absdiff_impl(reinterpret_cast(in1), + reinterpret_cast(in2), + reinterpret_cast(out), length); + } + else if (std::is_same::value) + { + return absdiff_impl(reinterpret_cast(in1), + reinterpret_cast(in2), + reinterpret_cast(out), length); + } + else if (std::is_same::value) + { + return absdiff_impl(reinterpret_cast(in1), + reinterpret_cast(in2), + reinterpret_cast(out), length); + } + else if (std::is_same::value) + { + return absdiff_impl(reinterpret_cast(in1), + reinterpret_cast(in2), + reinterpret_cast(out), length); + } + + return 0; +} + +template +CV_ALWAYS_INLINE int add_simd_sametype(const T in1[], const T in2[], T out[], int length) +{ + constexpr int nlanes = static_cast(VT::nlanes); + + if (length < nlanes) + return 0; + + int x = 0; + for (;;) + { + for (; x <= length - nlanes; x += nlanes) + { + VT a = vx_load(&in1[x]); + VT b = vx_load(&in2[x]); + vx_store(&out[x], a + b); + } + + if (x < length && (in1 != out) && (in2 != out)) + { + x = length - nlanes; + continue; // process one more time (unaligned tail) + } + break; + } + + return x; +} + +template +CV_ALWAYS_INLINE int add_simd(const SRC in1[], const SRC in2[], DST out[], int length) +{ + if (std::is_same::value && !std::is_same::value) + return 0; + + if (std::is_same::value) + { + if (std::is_same::value) + { + return add_simd_sametype(reinterpret_cast(in1), + reinterpret_cast(in2), + reinterpret_cast(out), length); + } + else if (std::is_same::value) + { + return add_simd_sametype(reinterpret_cast(in1), + reinterpret_cast(in2), + reinterpret_cast(out), length); + } + else if (std::is_same::value) + { + return add_simd_sametype(reinterpret_cast(in1), + reinterpret_cast(in2), + reinterpret_cast(out), length); + } + } + else if (std::is_same::value && std::is_same::value) + { + constexpr int nlanes = static_cast(v_uint8::nlanes); + + if (length < nlanes) + return 0; + + int x = 0; + for (;;) + { + for (; x <= length - nlanes; x += nlanes) + { + v_int16 a1 = vx_load(reinterpret_cast(&in1[x])); + v_int16 a2 = vx_load(reinterpret_cast(&in1[x + nlanes / 2])); + v_int16 b1 = vx_load(reinterpret_cast(&in2[x])); + v_int16 b2 = vx_load(reinterpret_cast(&in2[x + nlanes / 2])); + + vx_store(reinterpret_cast(&out[x]), v_pack_u(a1 + b1, a2 + b2)); + } + + if (x < length) + { + CV_DbgAssert((reinterpret_cast(in1) != reinterpret_cast(out)) && + (reinterpret_cast(in2) != reinterpret_cast(out))); + x = length - nlanes; + continue; // process one more time (unaligned tail) + } + break; + } + + return x; + } + else if (std::is_same::value && std::is_same::value) + { + constexpr int nlanes = static_cast(v_uint8::nlanes); + + if (length < nlanes) + return 0; + + int x = 0; + for (;;) + { + for (; x <= length - nlanes; x += nlanes) + { + v_float32 a1 = vx_load(reinterpret_cast(&in1[x])); + v_float32 a2 = vx_load(reinterpret_cast(&in1[x + nlanes / 4])); + v_float32 a3 = vx_load(reinterpret_cast(&in1[x + 2 * nlanes / 4])); + v_float32 a4 = vx_load(reinterpret_cast(&in1[x + 3 * nlanes / 4])); + + v_float32 b1 = vx_load(reinterpret_cast(&in2[x])); + v_float32 b2 = vx_load(reinterpret_cast(&in2[x + nlanes / 4])); + v_float32 b3 = vx_load(reinterpret_cast(&in2[x + 2 * nlanes / 4])); + v_float32 b4 = vx_load(reinterpret_cast(&in2[x + 3 * nlanes / 4])); + + vx_store(reinterpret_cast(&out[x]), v_pack_u(v_pack(v_round(a1 + b1), v_round(a2 + b2)), + v_pack(v_round(a3 + b3), v_round(a4 + b4)))); + } + + if (x < length) + { + CV_DbgAssert((reinterpret_cast(in1) != reinterpret_cast(out)) && + (reinterpret_cast(in2) != reinterpret_cast(out))); + x = length - nlanes; + continue; // process one more time (unaligned tail) + } + break; + } + + return x; + } + + return 0; +} + +template +CV_ALWAYS_INLINE int sub_simd_sametype(const T in1[], const T in2[], T out[], int length) +{ + constexpr int nlanes = static_cast(VT::nlanes); + + if (length < nlanes) + return 0; + + int x = 0; + for (;;) + { + for (; x <= length - nlanes; x += nlanes) + { + VT a = vx_load(&in1[x]); + VT b = vx_load(&in2[x]); + vx_store(&out[x], a - b); + } + + if (x < length && (in1 != out) && (in2 != out)) + { + x = length - nlanes; + continue; // process one more time (unaligned tail) + } + break; + } + + return x; +} + +template +CV_ALWAYS_INLINE int sub_simd(const SRC in1[], const SRC in2[], DST out[], int length) +{ + if (std::is_same::value && !std::is_same::value) + return 0; + + if (std::is_same::value) + { + if (std::is_same::value) + { + return sub_simd_sametype(reinterpret_cast(in1), + reinterpret_cast(in2), + reinterpret_cast(out), length); + } + else if (std::is_same::value) + { + return sub_simd_sametype(reinterpret_cast(in1), + reinterpret_cast(in2), + reinterpret_cast(out), length); + } + else if (std::is_same::value) + { + return sub_simd_sametype(reinterpret_cast(in1), + reinterpret_cast(in2), + reinterpret_cast(out), length); + } + } + else if (std::is_same::value && std::is_same::value) + { + constexpr int nlanes = static_cast(v_uint8::nlanes); + + if (length < nlanes) + return 0; + + int x = 0; + for (;;) + { + for (; x <= length - nlanes; x += nlanes) + { + v_int16 a1 = vx_load(reinterpret_cast(&in1[x])); + v_int16 a2 = vx_load(reinterpret_cast(&in1[x + nlanes / 2])); + v_int16 b1 = vx_load(reinterpret_cast(&in2[x])); + v_int16 b2 = vx_load(reinterpret_cast(&in2[x + nlanes / 2])); + + vx_store(reinterpret_cast(&out[x]), v_pack_u(a1 - b1, a2 - b2)); + } + + if (x < length) + { + CV_DbgAssert((reinterpret_cast(in1) != reinterpret_cast(out)) && + (reinterpret_cast(in2) != reinterpret_cast(out))); + x = length - nlanes; + continue; // process one more time (unaligned tail) + } + break; + } + + return x; + } + else if (std::is_same::value && std::is_same::value) + { + constexpr int nlanes = static_cast(v_uint8::nlanes); + + if (length < nlanes) + return 0; + + int x = 0; + for (;;) + { + for (; x <= length - nlanes; x += nlanes) + { + v_float32 a1 = vx_load(reinterpret_cast(&in1[x])); + v_float32 a2 = vx_load(reinterpret_cast(&in1[x + nlanes / 4])); + v_float32 a3 = vx_load(reinterpret_cast(&in1[x + 2 * nlanes / 4])); + v_float32 a4 = vx_load(reinterpret_cast(&in1[x + 3 * nlanes / 4])); + + v_float32 b1 = vx_load(reinterpret_cast(&in2[x])); + v_float32 b2 = vx_load(reinterpret_cast(&in2[x + nlanes / 4])); + v_float32 b3 = vx_load(reinterpret_cast(&in2[x + 2 * nlanes / 4])); + v_float32 b4 = vx_load(reinterpret_cast(&in2[x + 3 * nlanes / 4])); + + vx_store(reinterpret_cast(&out[x]), v_pack_u(v_pack(v_round(a1 - b1), v_round(a2 - b2)), + v_pack(v_round(a3 - b3), v_round(a4 - b4)))); + } + + if (x < length) + { + CV_DbgAssert((reinterpret_cast(in1) != reinterpret_cast(out)) && + (reinterpret_cast(in2) != reinterpret_cast(out))); + x = length - nlanes; + continue; // process one more time (unaligned tail) + } + break; + } + + return x; + } + + return 0; +} +#endif + template static void run_arithm(Buffer &dst, const View &src1, const View &src2, Arithm arithm, double scale=1) @@ -168,29 +510,37 @@ static void run_arithm(Buffer &dst, const View &src1, const View &src2, Arithm a // NB: assume in/out types are not 64-bits float _scale = static_cast( scale ); + int x = 0; + switch (arithm) { - case ARITHM_ABSDIFF: - for (int l=0; l < length; l++) - out[l] = absdiff(in1[l], in2[l]); - break; - case ARITHM_ADD: - for (int l=0; l < length; l++) - out[l] = add(in1[l], in2[l]); - break; - case ARITHM_SUBTRACT: - for (int l=0; l < length; l++) - out[l] = sub(in1[l], in2[l]); - break; - case ARITHM_MULTIPLY: - for (int l=0; l < length; l++) - out[l] = mul(in1[l], in2[l], _scale); - break; - case ARITHM_DIVIDE: - for (int l=0; l < length; l++) - out[l] = div(in1[l], in2[l], _scale); - break; - default: CV_Error(cv::Error::StsBadArg, "unsupported arithmetic operation"); + case ARITHM_ADD: + { +#if CV_SIMD + x = add_simd(in1, in2, out, length); +#endif + for (; x < length; ++x) + out[x] = add(in1[x], in2[x]); + break; + } + case ARITHM_SUBTRACT: + { +#if CV_SIMD + x = sub_simd(in1, in2, out, length); +#endif + for (; x < length; ++x) + out[x] = sub(in1[x], in2[x]); + break; + } + case ARITHM_MULTIPLY: + for (; x < length; ++x) + out[x] = mul(in1[x], in2[x], _scale); + break; + case ARITHM_DIVIDE: + for (; x < length; ++x) + out[x] = div(in1[x], in2[x], _scale); + break; + default: CV_Error(cv::Error::StsBadArg, "unsupported arithmetic operation"); } } @@ -270,6 +620,29 @@ GAPI_FLUID_KERNEL(GFluidDiv, cv::gapi::core::GDiv, false) } }; +template +static void run_absdiff(Buffer &dst, const View &src1, const View &src2) +{ + static_assert(std::is_same::value, "wrong types"); + static_assert(std::is_same::value, "wrong types"); + + const auto *in1 = src1.InLine(0); + const auto *in2 = src2.InLine(0); + auto *out = dst.OutLine(); + + int width = dst.length(); + int chan = dst.meta().chan; + int length = width * chan; + + int x = 0; + +#if CV_SIMD + x = absdiff_simd(in1, in2, out, length); +#endif + for (; x < length; ++x) + out[x] = absdiff(in1[x], in2[x]); +} + GAPI_FLUID_KERNEL(GFluidAbsDiff, cv::gapi::core::GAbsDiff, false) { static const int Window = 1; @@ -277,10 +650,10 @@ GAPI_FLUID_KERNEL(GFluidAbsDiff, cv::gapi::core::GAbsDiff, false) static void run(const View &src1, const View &src2, Buffer &dst) { // DST SRC1 SRC2 OP __VA_ARGS__ - BINARY_(uchar , uchar , uchar , run_arithm, dst, src1, src2, ARITHM_ABSDIFF); - BINARY_(ushort, ushort, ushort, run_arithm, dst, src1, src2, ARITHM_ABSDIFF); - BINARY_( short, short, short, run_arithm, dst, src1, src2, ARITHM_ABSDIFF); - BINARY_( float, float, float, run_arithm, dst, src1, src2, ARITHM_ABSDIFF); + BINARY_(uchar , uchar , uchar , run_absdiff, dst, src1, src2); + BINARY_(ushort, ushort, ushort, run_absdiff, dst, src1, src2); + BINARY_( short, short, short, run_absdiff, dst, src1, src2); + BINARY_( float, float, float, run_absdiff, dst, src1, src2); CV_Error(cv::Error::StsBadArg, "unsupported combination of types"); } diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/backends/ie/bindings_ie.cpp b/inference-engine/thirdparty/fluid/modules/gapi/src/backends/ie/bindings_ie.cpp new file mode 100644 index 00000000000..35191d7bcb5 --- /dev/null +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/backends/ie/bindings_ie.cpp @@ -0,0 +1,39 @@ +#include + +cv::gapi::ie::PyParams::PyParams(const std::string &tag, + const std::string &model, + const std::string &weights, + const std::string &device) + : m_priv(std::make_shared>(tag, model, weights, device)) { +} + +cv::gapi::ie::PyParams::PyParams(const std::string &tag, + const std::string &model, + const std::string &device) + : m_priv(std::make_shared>(tag, model, device)) { +} + +cv::gapi::GBackend cv::gapi::ie::PyParams::backend() const { + return m_priv->backend(); +} + +std::string cv::gapi::ie::PyParams::tag() const { + return m_priv->tag(); +} + +cv::util::any cv::gapi::ie::PyParams::params() const { + return m_priv->params(); +} + +cv::gapi::ie::PyParams cv::gapi::ie::params(const std::string &tag, + const std::string &model, + const std::string &weights, + const std::string &device) { + return {tag, model, weights, device}; +} + +cv::gapi::ie::PyParams cv::gapi::ie::params(const std::string &tag, + const std::string &model, + const std::string &device) { + return {tag, model, device}; +} diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/backends/ie/giebackend.cpp b/inference-engine/thirdparty/fluid/modules/gapi/src/backends/ie/giebackend.cpp index df4ffb1fe02..3cbe24364a1 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/src/backends/ie/giebackend.cpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/backends/ie/giebackend.cpp @@ -36,6 +36,7 @@ #include #include #include +#include #include "compiler/gobjref.hpp" #include "compiler/gmodel.hpp" @@ -45,6 +46,10 @@ #include "api/gbackend_priv.hpp" // FIXME: Make it part of Backend SDK! +#if INF_ENGINE_RELEASE < 2021010000 +#include "ie_compound_blob.h" +#endif + namespace IE = InferenceEngine; namespace { @@ -151,6 +156,25 @@ inline IE::Blob::Ptr wrapIE(const cv::Mat &mat, cv::gapi::ie::TraitAs hint) { return IE::Blob::Ptr{}; } +inline IE::Blob::Ptr wrapIE(const cv::MediaFrame::View& view, + const cv::GFrameDesc& desc) { + + switch (desc.fmt) { + case cv::MediaFormat::BGR: { + auto bgr = cv::Mat(desc.size, CV_8UC3, view.ptr[0], view.stride[0]); + return wrapIE(bgr, cv::gapi::ie::TraitAs::IMAGE); + } + case cv::MediaFormat::NV12: { + auto y_plane = cv::Mat(desc.size, CV_8UC1, view.ptr[0], view.stride[0]); + auto uv_plane = cv::Mat(desc.size / 2, CV_8UC2, view.ptr[1], view.stride[1]); + return cv::gapi::ie::util::to_ie(y_plane, uv_plane); + } + default: + GAPI_Assert(false && "Unsupported media format for IE backend"); + } + GAPI_Assert(false); +} + template inline void copyFromIE(const IE::Blob::Ptr &blob, MatType &mat) { switch (blob->getTensorDesc().getPrecision()) { @@ -175,11 +199,27 @@ struct IEUnit { IE::InputsDataMap inputs; IE::OutputsDataMap outputs; + IE::ExecutableNetwork this_network; + cv::gimpl::ie::wrap::Plugin this_plugin; + explicit IEUnit(const cv::gapi::ie::detail::ParamDesc &pp) : params(pp) { - net = cv::gimpl::ie::wrap::readNetwork(params); - inputs = net.getInputsInfo(); - outputs = net.getOutputsInfo(); + if (params.kind == cv::gapi::ie::detail::ParamDesc::Kind::Load) { + net = cv::gimpl::ie::wrap::readNetwork(params); + inputs = net.getInputsInfo(); + outputs = net.getOutputsInfo(); + } else if (params.kind == cv::gapi::ie::detail::ParamDesc::Kind::Import) { + this_plugin = cv::gimpl::ie::wrap::getPlugin(params); + this_plugin.SetConfig(params.config); + this_network = cv::gimpl::ie::wrap::importNetwork(this_plugin, params); + // FIXME: ICNNetwork returns InputsDataMap/OutputsDataMap, + // but ExecutableNetwork returns ConstInputsDataMap/ConstOutputsDataMap + inputs = cv::gimpl::ie::wrap::toInputsDataMap(this_network.GetInputsInfo()); + outputs = cv::gimpl::ie::wrap::toOutputsDataMap(this_network.GetOutputsInfo()); + } else { + cv::util::throw_error(std::logic_error("Unsupported ParamDesc::Kind")); + } + // The practice shows that not all inputs and not all outputs // are mandatory to specify in IE model. // So what we're concerned here about is: @@ -205,10 +245,16 @@ struct IEUnit { // This method is [supposed to be] called at Island compilation stage cv::gimpl::ie::IECompiled compile() const { - auto plugin = cv::gimpl::ie::wrap::getPlugin(params); - auto this_network = cv::gimpl::ie::wrap::loadNetwork(plugin, net, params); - auto this_request = this_network.CreateInferRequest(); + IEUnit* non_const_this = const_cast(this); + if (params.kind == cv::gapi::ie::detail::ParamDesc::Kind::Load) { + // FIXME: In case importNetwork for fill inputs/outputs need to obtain ExecutableNetwork, but + // for loadNetwork they can be obtained by using readNetwork + non_const_this->this_plugin = cv::gimpl::ie::wrap::getPlugin(params); + non_const_this->this_plugin.SetConfig(params.config); + non_const_this->this_network = cv::gimpl::ie::wrap::loadNetwork(non_const_this->this_plugin, net, params); + } + auto this_request = non_const_this->this_network.CreateInferRequest(); // Bind const data to infer request for (auto &&p : params.const_inputs) { // FIXME: SetBlob is known to be inefficient, @@ -217,7 +263,16 @@ struct IEUnit { // Still, constant data is to set only once. this_request.SetBlob(p.first, wrapIE(p.second.first, p.second.second)); } - return {plugin, this_network, this_request}; + // Bind const data to infer request + for (auto &&p : params.const_inputs) { + // FIXME: SetBlob is known to be inefficient, + // it is worth to make a customizable "initializer" and pass the + // cv::Mat-wrapped blob there to support IE's optimal "GetBlob idiom" + // Still, constant data is to set only once. + this_request.SetBlob(p.first, wrapIE(p.second.first, p.second.second)); + } + + return {this_plugin, this_network, this_request}; } }; @@ -225,6 +280,7 @@ struct IECallContext { // Input parameters passed to an inference operation. std::vector args; + cv::GShapes in_shapes; //FIXME: avoid conversion of arguments from internal representation to OpenCV one on each call //to OCV kernel. (This can be achieved by a two single time conversions in GCPUExecutable::run, @@ -236,6 +292,10 @@ struct IECallContext template const T& inArg(std::size_t input) { return args.at(input).get(); } + const cv::MediaFrame& inFrame(std::size_t input) { + return inArg(input); + } + // Syntax sugar const cv::Mat& inMat(std::size_t input) { return inArg(input); @@ -288,6 +348,24 @@ using GConstGIEModel = ade::ConstTypedGraph , IEUnit , IECallable >; + +using Views = std::vector>; + +inline IE::Blob::Ptr extractBlob(IECallContext& ctx, std::size_t i, Views& views) { + switch (ctx.in_shapes[i]) { + case cv::GShape::GFRAME: { + const auto& frame = ctx.inFrame(i); + views.emplace_back(new cv::MediaFrame::View(frame.access(cv::MediaFrame::Access::R))); + return wrapIE(*views.back(), frame.desc()); + } + case cv::GShape::GMAT: { + return wrapIE(ctx.inMat(i), cv::gapi::ie::TraitAs::IMAGE); + } + default: + GAPI_Assert("Unsupported input shape for IE backend"); + } + GAPI_Assert(false); +} } // anonymous namespace // GCPUExcecutable implementation ////////////////////////////////////////////// @@ -353,6 +431,8 @@ cv::GArg cv::gimpl::ie::GIEExecutable::packArg(const cv::GArg &arg) { // (and constructed by either bindIn/Out or resetInternal) case GShape::GOPAQUE: return GArg(m_res.slot().at(ref.id)); + case GShape::GFRAME: return GArg(m_res.slot().at(ref.id)); + default: util::throw_error(std::logic_error("Unsupported GShape type")); break; @@ -382,6 +462,12 @@ void cv::gimpl::ie::GIEExecutable::run(std::vector &&input_objs, std::back_inserter(context.args), std::bind(&GIEExecutable::packArg, this, _1)); + // NB: Need to store inputs shape to recognize GFrame/GMat + ade::util::transform(op.args, + std::back_inserter(context.in_shapes), + [](const cv::GArg& arg) { + return arg.get().shape; + }); // - Output parameters. for (const auto &out_it : ade::util::indexed(op.outs)) { // FIXME: Can the same GArg type resolution mechanism be reused here? @@ -397,12 +483,44 @@ void cv::gimpl::ie::GIEExecutable::run(std::vector &&input_objs, kk.run(this_iec, uu, context); for (auto &it : output_objs) magazine::writeBack(m_res, it.first, it.second); + + // In/Out args clean-up is mandatory now with RMat + for (auto &it : input_objs) magazine::unbind(m_res, it.first); + for (auto &it : output_objs) magazine::unbind(m_res, it.first); } namespace cv { namespace gimpl { namespace ie { +static void configureInputInfo(const IE::InputInfo::Ptr& ii, const cv::GMetaArg mm) { + switch (mm.index()) { + case cv::GMetaArg::index_of(): + { + ii->setPrecision(toIE(util::get(mm).depth)); + break; + } + case cv::GMetaArg::index_of(): + { + const auto &meta = util::get(mm); + switch (meta.fmt) { + case cv::MediaFormat::NV12: + ii->getPreProcess().setColorFormat(IE::ColorFormat::NV12); + break; + case cv::MediaFormat::BGR: + // NB: Do nothing + break; + default: + GAPI_Assert(false && "Unsupported media format for IE backend"); + } + ii->setPrecision(toIE(CV_8U)); + break; + } + default: + util::throw_error(std::runtime_error("Unsupported input meta for IE backend")); + } +} + struct Infer: public cv::detail::KernelTag { using API = cv::GInferBase; static cv::gapi::GBackend backend() { return cv::gapi::ie::backend(); } @@ -433,11 +551,7 @@ struct Infer: public cv::detail::KernelTag { auto &&ii = uu.inputs.at(std::get<0>(it)); const auto & mm = std::get<1>(it); - GAPI_Assert(util::holds_alternative(mm) - && "Non-GMat inputs are not supported"); - - const auto &meta = util::get(mm); - ii->setPrecision(toIE(meta.depth)); + configureInputInfo(ii, mm); ii->getPreProcess().setResizeAlgorithm(IE::RESIZE_BILINEAR); } @@ -460,15 +574,12 @@ struct Infer: public cv::detail::KernelTag { static void run(IECompiled &iec, const IEUnit &uu, IECallContext &ctx) { // non-generic version for now: // - assumes all inputs/outputs are always Mats + Views views; for (auto i : ade::util::iota(uu.params.num_in)) { // TODO: Ideally we shouldn't do SetBlob() but GetBlob() instead, // and redirect our data producers to this memory // (A memory dialog comes to the picture again) - - const cv::Mat this_mat = ctx.inMat(i); - // FIXME: By default here we trait our inputs as images. - // May be we need to make some more intelligence here about it - IE::Blob::Ptr this_blob = wrapIE(this_mat, cv::gapi::ie::TraitAs::IMAGE); + IE::Blob::Ptr this_blob = extractBlob(ctx, i, views); iec.this_request.SetBlob(uu.params.input_names[i], this_blob); } iec.this_request.Infer(); @@ -486,6 +597,67 @@ struct Infer: public cv::detail::KernelTag { } }; +struct InferROI: public cv::detail::KernelTag { + using API = cv::GInferROIBase; + static cv::gapi::GBackend backend() { return cv::gapi::ie::backend(); } + static KImpl kernel() { return KImpl{outMeta, run}; } + + static cv::GMetaArgs outMeta(const ade::Graph &gr, + const ade::NodeHandle &nh, + const cv::GMetaArgs &in_metas, + const cv::GArgs &/*in_args*/) { + cv::GMetaArgs result; + + GConstGIEModel gm(gr); + const auto &uu = gm.metadata(nh).get(); + + // Initialize input information + // FIXME: So far it is pretty limited + GAPI_Assert(1u == uu.params.input_names.size()); + GAPI_Assert(2u == in_metas.size()); + + // 0th is ROI, 1st is input image + auto &&ii = uu.inputs.at(uu.params.input_names.at(0)); + auto &&mm = in_metas.at(1u); + configureInputInfo(ii, mm); + ii->getPreProcess().setResizeAlgorithm(IE::RESIZE_BILINEAR); + + // FIXME: It would be nice here to have an exact number of network's + // input/output parameters. Probably GCall should store it here for us. + // It doesn't, as far as I know.. + for (const auto &out_name : uu.params.output_names) { + // NOTE: our output_names vector follows the API order + // of this operation's outputs + const IE::DataPtr& ie_out = uu.outputs.at(out_name); + const IE::SizeVector dims = ie_out->getTensorDesc().getDims(); + + cv::GMatDesc outm(toCV(ie_out->getPrecision()), + toCV(ie_out->getTensorDesc().getDims())); + result.emplace_back(outm); + } + return result; + } + + static void run(IECompiled &iec, const IEUnit &uu, IECallContext &ctx) { + // non-generic version for now, per the InferROI's definition + GAPI_Assert(uu.params.num_in == 1); + const auto& this_roi = ctx.inArg(0).rref(); + + Views views; + IE::Blob::Ptr this_blob = extractBlob(ctx, 1, views); + + iec.this_request.SetBlob(*uu.params.input_names.begin(), + IE::make_shared_blob(this_blob, toIE(this_roi))); + iec.this_request.Infer(); + for (auto i : ade::util::iota(uu.params.num_out)) { + cv::Mat& out_mat = ctx.outMatR(i); + IE::Blob::Ptr out_blob = iec.this_request.GetBlob(uu.params.output_names[i]); + copyFromIE(out_blob, out_mat); + } + } +}; + + struct InferList: public cv::detail::KernelTag { using API = cv::GInferListBase; static cv::gapi::GBackend backend() { return cv::gapi::ie::backend(); } @@ -512,12 +684,7 @@ struct InferList: public cv::detail::KernelTag { for (auto &&input_name : uu.params.input_names) { auto &&ii = uu.inputs.at(input_name); const auto & mm = in_metas[idx++]; - - GAPI_Assert(util::holds_alternative(mm) - && "Non-GMat inputs are not supported"); - - const auto &meta = util::get(mm); - ii->setPrecision(toIE(meta.depth)); + configureInputInfo(ii, mm); ii->getPreProcess().setResizeAlgorithm(IE::RESIZE_BILINEAR); } @@ -536,9 +703,9 @@ struct InferList: public cv::detail::KernelTag { GAPI_Assert(uu.params.num_in == 1); // roi list is not counted in net's inputs const auto& in_roi_vec = ctx.inArg(0u).rref(); - const cv::Mat this_mat = ctx.inMat(1u); - // Since we do a ROI list inference, always assume our input buffer is image - IE::Blob::Ptr this_blob = wrapIE(this_mat, cv::gapi::ie::TraitAs::IMAGE); + + Views views; + IE::Blob::Ptr this_blob = extractBlob(ctx, 1, views); // FIXME: This could be done ONCE at graph compile stage! std::vector< std::vector > cached_dims(uu.params.num_out); @@ -602,11 +769,30 @@ struct InferList2: public cv::detail::KernelTag { // "blob"-based ones) // FIXME: this is filtering not done, actually! GArrayDesc has // no hint for its underlying type! - const auto &mm_0 = in_metas[0u]; - const auto &meta_0 = util::get(mm_0); - GAPI_Assert( !meta_0.isND() + const auto &mm_0 = in_metas[0u]; + switch (in_metas[0u].index()) { + case cv::GMetaArg::index_of(): { + const auto &meta_0 = util::get(mm_0); + GAPI_Assert( !meta_0.isND() + && !meta_0.planar + && "Only images are supported as the 0th argument"); + break; + } + case cv::GMetaArg::index_of(): { + // FIXME: Is there any validation for GFrame ? + break; + } + default: + util::throw_error(std::runtime_error("Unsupported input meta for IE backend")); + } + + if (util::holds_alternative(mm_0)) { + const auto &meta_0 = util::get(mm_0); + GAPI_Assert( !meta_0.isND() && !meta_0.planar && "Only images are supported as the 0th argument"); + } + std::size_t idx = 1u; for (auto &&input_name : uu.params.input_names) { auto &ii = uu.inputs.at(input_name); @@ -616,7 +802,7 @@ struct InferList2: public cv::detail::KernelTag { if (op.k.inKinds[idx] == cv::detail::OpaqueKind::CV_RECT) { // This is a cv::Rect -- configure the IE preprocessing - ii->setPrecision(toIE(meta_0.depth)); + configureInputInfo(ii, mm_0); ii->getPreProcess().setResizeAlgorithm(IE::RESIZE_BILINEAR); } else { // This is a cv::GMat (equals to: cv::Mat) @@ -639,9 +825,8 @@ struct InferList2: public cv::detail::KernelTag { GAPI_Assert(ctx.args.size() > 1u && "This operation must have at least two arguments"); - // Since we do a ROI list inference, always assume our input buffer is image - const cv::Mat mat_0 = ctx.inMat(0u); - IE::Blob::Ptr blob_0 = wrapIE(mat_0, cv::gapi::ie::TraitAs::IMAGE); + Views views; + IE::Blob::Ptr blob_0 = extractBlob(ctx, 0, views); // Take the next argument, which must be vector (of any kind). // Use it only to obtain the ROI list size (sizes of all other @@ -717,9 +902,23 @@ namespace { // FIXME: Introduce a DNNBackend interface which'd specify // the framework for this??? GIEModel gm(gr); - const auto &np = gm.metadata(nh).get(); - const auto &pp = cv::util::any_cast(np.opaque); + auto &np = gm.metadata(nh).get(); + auto &pp = cv::util::any_cast(np.opaque); const auto &ki = cv::util::any_cast(ii.opaque); + + GModel::Graph model(gr); + auto& op = model.metadata(nh).get(); + + // NB: In case generic infer, info about in/out names is stored in operation (op.params) + if (pp.is_generic) + { + auto& info = cv::util::any_cast(op.params); + pp.input_names = info.in_names; + pp.output_names = info.out_names; + pp.num_in = info.in_names.size(); + pp.num_out = info.out_names.size(); + } + gm.metadata(nh).set(IEUnit{pp}); gm.metadata(nh).set(IECallable{ki.run}); gm.metadata(nh).set(CustomMetaFunction{ki.customMetaFunc}); @@ -733,6 +932,7 @@ namespace { virtual cv::gapi::GKernelPackage auxiliaryKernels() const override { return cv::gapi::kernels< cv::gimpl::ie::Infer + , cv::gimpl::ie::InferROI , cv::gimpl::ie::InferList , cv::gimpl::ie::InferList2 >(); @@ -760,6 +960,16 @@ IE::Blob::Ptr cv::gapi::ie::util::to_ie(cv::Mat &blob) { return wrapIE(blob, cv::gapi::ie::TraitAs::IMAGE); } +IE::Blob::Ptr cv::gapi::ie::util::to_ie(cv::Mat &y_plane, cv::Mat &uv_plane) { + auto y_blob = wrapIE(y_plane, cv::gapi::ie::TraitAs::IMAGE); + auto uv_blob = wrapIE(uv_plane, cv::gapi::ie::TraitAs::IMAGE); +#if INF_ENGINE_RELEASE >= 2021010000 + return IE::make_shared_blob(y_blob, uv_blob); +#else + return IE::make_shared_blob(y_blob, uv_blob); +#endif +} + #else // HAVE_INF_ENGINE cv::gapi::GBackend cv::gapi::ie::backend() { diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/backends/ie/giebackend/giewrapper.cpp b/inference-engine/thirdparty/fluid/modules/gapi/src/backends/ie/giebackend/giewrapper.cpp index 444d9553e79..ba0632d4f0f 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/src/backends/ie/giebackend/giewrapper.cpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/backends/ie/giebackend/giewrapper.cpp @@ -22,6 +22,24 @@ namespace IE = InferenceEngine; namespace giewrap = cv::gimpl::ie::wrap; using GIEParam = cv::gapi::ie::detail::ParamDesc; +IE::InputsDataMap giewrap::toInputsDataMap (const IE::ConstInputsDataMap& inputs) { + IE::InputsDataMap transformed; + auto convert = [](const std::pair& p) { + return std::make_pair(p.first, std::const_pointer_cast(p.second)); + }; + std::transform(inputs.begin(), inputs.end(), std::inserter(transformed, transformed.end()), convert); + return transformed; +} + +IE::OutputsDataMap giewrap::toOutputsDataMap (const IE::ConstOutputsDataMap& outputs) { + IE::OutputsDataMap transformed; + auto convert = [](const std::pair& p) { + return std::make_pair(p.first, std::const_pointer_cast(p.second)); + }; + std::transform(outputs.begin(), outputs.end(), std::inserter(transformed, transformed.end()), convert); + return transformed; +} + #if INF_ENGINE_RELEASE < 2020000000 // < 2020.1 // Load extensions (taken from DNN module) std::vector giewrap::getExtensions(const GIEParam& params) { diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/backends/ie/giebackend/giewrapper.hpp b/inference-engine/thirdparty/fluid/modules/gapi/src/backends/ie/giebackend/giewrapper.hpp index 7871942d263..3927c802b71 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/src/backends/ie/giebackend/giewrapper.hpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/backends/ie/giebackend/giewrapper.hpp @@ -28,7 +28,11 @@ namespace wrap { GAPI_EXPORTS std::vector getExtensions(const GIEParam& params); GAPI_EXPORTS IE::CNNNetwork readNetwork(const GIEParam& params); +IE::InputsDataMap toInputsDataMap (const IE::ConstInputsDataMap& inputs); +IE::OutputsDataMap toOutputsDataMap(const IE::ConstOutputsDataMap& outputs); + #if INF_ENGINE_RELEASE < 2019020000 // < 2019.R2 +using Plugin = IE::InferencePlugin; GAPI_EXPORTS IE::InferencePlugin getPlugin(const GIEParam& params); GAPI_EXPORTS inline IE::ExecutableNetwork loadNetwork( IE::InferencePlugin& plugin, const IE::CNNNetwork& net, @@ -36,7 +40,12 @@ GAPI_EXPORTS inline IE::ExecutableNetwork loadNetwork( IE::InferencePlugin& return plugin.LoadNetwork(net, {}); // FIXME: 2nd parameter to be // configurable via the API } +GAPI_EXPORTS inline IE::ExecutableNetwork importNetwork( IE::CNNNetwork& plugin, + const GIEParam& param) { + return plugin.ImportNetwork(param.model_path, param.device_id, {}); +} #else // >= 2019.R2 +using Plugin = IE::Core; GAPI_EXPORTS IE::Core getCore(); GAPI_EXPORTS IE::Core getPlugin(const GIEParam& params); GAPI_EXPORTS inline IE::ExecutableNetwork loadNetwork( IE::Core& core, @@ -44,6 +53,10 @@ GAPI_EXPORTS inline IE::ExecutableNetwork loadNetwork( IE::Core& core const GIEParam& params) { return core.LoadNetwork(net, params.device_id); } +GAPI_EXPORTS inline IE::ExecutableNetwork importNetwork( IE::Core& core, + const GIEParam& param) { + return core.ImportNetwork(param.model_path, param.device_id, {}); +} #endif // INF_ENGINE_RELEASE < 2019020000 }}}} diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/backends/ie/util.hpp b/inference-engine/thirdparty/fluid/modules/gapi/src/backends/ie/util.hpp index b16ccbe0ce3..080c88498fe 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/src/backends/ie/util.hpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/backends/ie/util.hpp @@ -28,6 +28,7 @@ namespace util { GAPI_EXPORTS std::vector to_ocv(const InferenceEngine::SizeVector &dims); GAPI_EXPORTS cv::Mat to_ocv(InferenceEngine::Blob::Ptr blob); GAPI_EXPORTS InferenceEngine::Blob::Ptr to_ie(cv::Mat &blob); +GAPI_EXPORTS InferenceEngine::Blob::Ptr to_ie(cv::Mat &y_plane, cv::Mat &uv_plane); }}}} diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/backends/ocl/goclbackend.cpp b/inference-engine/thirdparty/fluid/modules/gapi/src/backends/ocl/goclbackend.cpp index 8705deb7c2c..847b802fd29 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/src/backends/ocl/goclbackend.cpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/backends/ocl/goclbackend.cpp @@ -165,12 +165,31 @@ void cv::gimpl::GOCLExecutable::run(std::vector &&input_objs, // NB: avoid clearing the whole magazine, there's also pre-allocated internal data for (auto& it : input_objs) umats.erase(it.first.id); for (auto& it : output_objs) umats.erase(it.first.id); + + // In/Out args clean-up is mandatory now with RMat + for (auto &it : input_objs) magazine::unbind(*p, it.first); + for (auto &it : output_objs) magazine::unbind(*p, it.first); }; // RAII wrapper to clean-up m_res std::unique_ptr cleaner(&m_res, clean_up); - for (auto& it : input_objs) magazine::bindInArg (m_res, it.first, it.second, true); - for (auto& it : output_objs) magazine::bindOutArg(m_res, it.first, it.second, true); + const auto bindUMat = [this](const RcDesc& rc) { + auto& mag_umat = m_res.template slot()[rc.id]; + mag_umat = m_res.template slot()[rc.id].getUMat(ACCESS_READ); + }; + + for (auto& it : input_objs) { + const auto& rc = it.first; + magazine::bindInArg (m_res, rc, it.second); + // There is already cv::Mat in the magazine after bindInArg call, + // extract UMat from it, put into the magazine + if (rc.shape == GShape::GMAT) bindUMat(rc); + } + for (auto& it : output_objs) { + const auto& rc = it.first; + magazine::bindOutArg(m_res, rc, it.second); + if (rc.shape == GShape::GMAT) bindUMat(rc); + } // Initialize (reset) internal data nodes with user structures // before processing a frame (no need to do it for external data structures) @@ -241,5 +260,20 @@ void cv::gimpl::GOCLExecutable::run(std::vector &&input_objs, } } // for(m_script) - for (auto &it : output_objs) magazine::writeBack(m_res, it.first, it.second, true); + for (auto &it : output_objs) + { + auto& rc = it.first; + auto& g_arg = it.second; + magazine::writeBack(m_res, rc, g_arg); + if (rc.shape == GShape::GMAT) + { + uchar* out_arg_data = m_res.template slot()[rc.id].data; + auto& mag_mat = m_res.template slot().at(rc.id); + GAPI_Assert((out_arg_data == (mag_mat.getMat(ACCESS_RW).data)) && " data for output parameters was reallocated ?"); + } + } + + // In/Out args clean-up is mandatory now with RMat + for (auto &it : input_objs) magazine::unbind(m_res, it.first); + for (auto &it : output_objs) magazine::unbind(m_res, it.first); } diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/backends/ocl/goclcore.cpp b/inference-engine/thirdparty/fluid/modules/gapi/src/backends/ocl/goclcore.cpp index ec6ab18f840..61e03340fb9 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/src/backends/ocl/goclcore.cpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/backends/ocl/goclcore.cpp @@ -2,7 +2,7 @@ // It is subject to the license terms in the LICENSE file found in the top-level directory // of this distribution and at http://opencv.org/license.html. // -// Copyright (C) 2018 Intel Corporation +// Copyright (C) 2018-2020 Intel Corporation #include "precomp.hpp" @@ -337,6 +337,14 @@ GAPI_OCL_KERNEL(GOCLSum, cv::gapi::core::GSum) } }; +GAPI_OCL_KERNEL(GOCLCountNonZero, cv::gapi::core::GCountNonZero) +{ + static void run(const cv::UMat& in, int& out) + { + out = cv::countNonZero(in); + } +}; + GAPI_OCL_KERNEL(GOCLAddW, cv::gapi::core::GAddW) { static void run(const cv::UMat& in1, double alpha, const cv::UMat& in2, double beta, double gamma, int dtype, cv::UMat& out) @@ -565,6 +573,7 @@ cv::gapi::GKernelPackage cv::gapi::core::ocl::kernels() , GOCLAbsDiff , GOCLAbsDiffC , GOCLSum + , GOCLCountNonZero , GOCLAddW , GOCLNormL1 , GOCLNormL2 diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/backends/ocl/goclkernel.cpp b/inference-engine/thirdparty/fluid/modules/gapi/src/backends/ocl/goclkernel.cpp index 585e0174c80..9ea8502d265 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/src/backends/ocl/goclkernel.cpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/backends/ocl/goclkernel.cpp @@ -34,6 +34,11 @@ cv::detail::VectorRef& cv::GOCLContext::outVecRef(int output) return util::get(m_results.at(output)); } +cv::detail::OpaqueRef& cv::GOCLContext::outOpaqueRef(int output) +{ + return util::get(m_results.at(output)); +} + cv::GOCLKernel::GOCLKernel() { } diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/backends/onnx/gonnxbackend.cpp b/inference-engine/thirdparty/fluid/modules/gapi/src/backends/onnx/gonnxbackend.cpp new file mode 100644 index 00000000000..7ab386ecabd --- /dev/null +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/backends/onnx/gonnxbackend.cpp @@ -0,0 +1,963 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Copyright (C) 2020 Intel Corporation + +#include "precomp.hpp" +#include "backends/onnx/gonnxbackend.hpp" + +#ifdef HAVE_ONNX + +#include // any_of +#include +#include +#include + +#include "api/gbackend_priv.hpp" // FIXME: Make it part of Backend SDK! + +namespace cv { +namespace gimpl { +namespace onnx { + +enum TensorPosition : int { + INPUT, + OUTPUT +}; + +struct TensorInfo { + TensorInfo() = default; + explicit TensorInfo(const Ort::TensorTypeAndShapeInfo& info) + : dims(info.GetShape()) + , type(info.GetElementType()) + , is_dynamic(std::find(dims.begin(), dims.end(), -1) != dims.end()) { + if (!is_dynamic) { + size = std::accumulate(dims.begin(), + dims.end(), + static_cast(1), + std::multiplies()); + } + // Heuristic: check if the tensor is grayscale input + if (dims.size() == 4u + && dims[0] == 1 + && dims[1] == 1 + && dims[2] > 1 + && dims[3] > 1) { + is_grayscale = true; + } + } + + std::string name; + std::vector dims; + ONNXTensorElementDataType type = ONNX_TENSOR_ELEMENT_DATA_TYPE_UNDEFINED; + int64_t size = -1; + + bool normalize = true; + + bool is_dynamic = false; + bool is_grayscale = false; + + struct MeanStdev { + cv::Scalar mean; + cv::Scalar stdev; + }; + cv::util::optional mstd; +}; + +class ONNXCompiled { + // ONNX Resources + // NOTE: Env must live with the session, otherwise segfaults. + Ort::Env this_env{nullptr}; + Ort::Session this_session{nullptr}; + Ort::MemoryInfo this_memory_info{nullptr}; + + std::vector in_tensor_info; + std::vector out_tensor_info; + bool is_dynamic = false; + + // G-API description + gapi::onnx::detail::ParamDesc params; + + // Input/output tensor information + std::vector getTensorInfo(TensorPosition pos); + + // Run-time data structures + std::vector in_data; + std::vector out_data; + + void Run(const std::vector& ins, + const std::vector& outs); + +public: + explicit ONNXCompiled(const gapi::onnx::detail::ParamDesc &pp); + + // Extract the information about output layer #i + cv::GMatDesc outMeta(int i) const; + + // Assign input/output info + std::size_t numInputs() const { return params.num_in; } + std::size_t numOutputs() const { return params.num_out; } + void setInput(int i, const cv::Mat &m); + void setOutput(int i, cv::Mat &m); + cv::Mat allocOutput(int i) const; + + // Run with the assigned inputs/outputs + void run(); +}; + +} // namespace onnx +} // namespace gimpl +} // namespace cv + +namespace { + +inline std::vector getCharNames(const std::vector& names) { + std::vector out_vec; + for (const auto& el : names) { + out_vec.push_back(el.data()); + } + return out_vec; +} + +inline int getIdxByName(const std::vector& info, const std::string& name) { + // FIXME: Cache the ordering + const auto it = std::find_if(info.begin(), info.end(), [&](const cv::gimpl::onnx::TensorInfo &i) { + return i.name == name; + }); + GAPI_Assert(it != info.end()); + return std::distance(info.begin(), it); +} + +inline int toCV(ONNXTensorElementDataType prec) { + switch (prec) { + case ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT8: return CV_8U; + case ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT: return CV_32F; + default: GAPI_Assert(false && "Unsupported data type"); + } + return -1; +} + +inline std::vector toCV(const std::vector &vsz) { + std::vector result; + result.reserve(vsz.size()); + for (auto sz : vsz) { + result.push_back(ade::util::checked_cast(sz)); + } + return result; +} + +inline cv::Mat toCV(Ort::Value &v) { + auto info = v.GetTensorTypeAndShapeInfo(); + return cv::Mat(toCV(info.GetShape()), + toCV(info.GetElementType()), + reinterpret_cast(v.GetTensorMutableData())); +} + +inline std::vector toORT(const cv::MatSize &sz) { + return cv::to_own(sz); +} + +inline void preprocess(const cv::Mat& src, + const cv::gimpl::onnx::TensorInfo& ti, + cv::Mat& dst) { + GAPI_Assert(src.depth() == CV_32F || src.depth() == CV_8U); + + if (src.depth() == CV_32F) { + // Just pass the tensor as-is. + // No layout or dimension transformations done here! + // TODO: This needs to be aligned across all NN backends. + GAPI_Assert(toCV(ti.type) == CV_32F && "Only 32F model input is supported for 32F data"); + const auto tensor_dims = toORT(src.size); + if (tensor_dims.size() == ti.dims.size()) { + for (size_t i = 0; i < ti.dims.size(); ++i) { + GAPI_Assert((ti.dims[i] == -1 || ti.dims[i] == tensor_dims[i]) && + "32F tensor dimensions should match with all non-dynamic NN input dimensions"); + } + } else { + GAPI_Assert(false && "32F tensor size should match with NN input"); + } + + dst = src; + } else { + // 8U input: full preprocessing path + GAPI_Assert(src.depth() == CV_8U && "Only 8U data type is supported for preproc"); + GAPI_Assert(ti.dims.size() == 4u && "Only NCHW/NHWC layouts are supported for preproc"); + + const auto ddepth = toCV(ti.type); + GAPI_Assert((ddepth == CV_8U || ddepth == CV_32F) + && "Only 8U and 32F model input is supported for 8U data"); + + // Assess the expected input layout + const bool is_hwc = [&](int ch) { + if (ti.is_grayscale) return false; // 1,1,h,w + else if (ti.dims[3] == ch) return true; // _,_,_,c + else if (ti.dims[1] == ch) return false; // _,c,_,_ + else cv::util::throw_error(std::logic_error("Couldn't identify input tensor layout")); + } (src.channels()); + + int new_c = src.channels(); + cv::Mat csc; + if (ti.is_grayscale && new_c == 3) { + cv::cvtColor(src, csc, cv::COLOR_BGR2GRAY); + new_c = 1; + } else { + csc = src; + } + + // NHWC vs NCHW + int new_h = -1, new_w = -1; + if (ti.is_dynamic) { + // reuse h & w from the input image + new_h = src.rows; + new_w = src.cols; + } else { + // take h & w from the ONNX tensor info + new_h = ti.dims[is_hwc ? 1 : 2]; + new_w = ti.dims[is_hwc ? 2 : 3]; + } + GAPI_Assert(new_h != -1 && new_w != -1); + + cv::Mat rsz, pp; + cv::resize(csc, rsz, cv::Size(new_w, new_h)); + if (src.depth() == CV_8U && ddepth == CV_32F) { + rsz.convertTo(pp, ddepth, ti.normalize ? 1.f / 255 : 1.f); + if (ti.mstd.has_value()) { + pp -= ti.mstd->mean; + pp /= ti.mstd->stdev; + } + } else { + pp = rsz; + } + + if (!is_hwc && new_c > 1) { + // Convert to CHW + dst.create(cv::Size(new_w, new_h * new_c), ddepth); + std::vector planes(new_c); + for (int ch = 0; ch < new_c; ++ch) { + planes[ch] = dst.rowRange(ch * new_h, (ch + 1) * new_h); + } + cv::split(pp, planes); + } else { + // Keep HWC + dst = pp; + } + + // Ensure dst is a tensor shape (not a 2D image) + if (ti.is_dynamic) { + // Reshape to input dimensions + const std::vector out_dims = is_hwc + ? std::vector{1, new_h, new_w, new_c} + : std::vector{1, new_c, new_h, new_w}; + dst = dst.reshape(1, out_dims); + } else { + // Reshape to ONNX dimensions (no -1s there!) + dst = dst.reshape(1, toCV(ti.dims)); + } + } +} + +template +inline Ort::Value createTensor(const Ort::MemoryInfo& memory_info, + const cv::gimpl::onnx::TensorInfo& tensor_params, + const cv::Mat& data) { + (void) tensor_params; + auto ort_dims = toORT(data.size); + return Ort::Value::CreateTensor(memory_info, + const_cast(data.ptr()), + data.total(), + ort_dims.data(), + ort_dims.size()); +} + +inline Ort::Value createTensor(const Ort::MemoryInfo& memory_info, + const cv::gimpl::onnx::TensorInfo& tensor_params, + const cv::Mat& data) { + GAPI_Assert(data.isContinuous ()); + switch (tensor_params.type) { + case ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT8: + return createTensor(memory_info, tensor_params, data); + case ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT: + return createTensor(memory_info, tensor_params, data); + default: + GAPI_Assert(false && "Unsupported data type"); + } + return Ort::Value{nullptr}; +} + +struct ONNXUnit { + static const char *name() { return "ONNXModelConfig"; } + + std::shared_ptr oc; + + explicit ONNXUnit(const cv::gapi::onnx::detail::ParamDesc &pp) + : oc(new cv::gimpl::onnx::ONNXCompiled(pp)) { + } +}; + +struct ONNXCallContext { + // Input parameters passed to an inference operation. + std::vector args; + + //FIXME: avoid conversion of arguments from internal representation to OpenCV one on each call + //to OCV kernel. (This can be achieved by a two single time conversions in GCPUExecutable::run, + //once on enter for input and output arguments, and once before return for output arguments only + //FIXME: check if the above applies to this backend (taken from CPU) + std::unordered_map results; + + // Generic accessor API + template + const T& inArg(std::size_t input) { return args.at(input).get(); } + + // Syntax sugar + const cv::Mat& inMat(std::size_t input) { + return inArg(input); + } + cv::Mat& outMatR(std::size_t output) { + return *cv::util::get(results.at(output)); + } + + template std::vector& outVecR(std::size_t output) { // FIXME: the same issue + return outVecRef(output).wref(); + } + cv::detail::VectorRef& outVecRef(std::size_t output) { + return cv::util::get(results.at(output)); + } +}; + +struct ONNXCallable { + static const char *name() { return "ONNXRequestCallable"; } + using Run = std::function; + Run run; +}; + +struct KImpl { + cv::gimpl::CustomMetaFunction::CM customMetaFunc; + ONNXCallable::Run run; +}; + +// FIXME: Is there a way to take a typed graph (our GModel), +// and create a new typed graph _ATOP_ of that (by extending with a couple of +// new types?). +// Alternatively, is there a way to compose types graphs? +// +// If not, we need to introduce that! +using GONNXModel = ade::TypedGraph + < cv::gimpl::Protocol + , cv::gimpl::Op + , cv::gimpl::NetworkParams + , cv::gimpl::CustomMetaFunction + , ONNXUnit + , ONNXCallable + >; + +// FIXME: Same issue with Typed and ConstTyped +using GConstGONNXModel = ade::ConstTypedGraph + < cv::gimpl::Protocol + , cv::gimpl::Op + , cv::gimpl::NetworkParams + , cv::gimpl::CustomMetaFunction + , ONNXUnit + , ONNXCallable + >; +} // anonymous namespace + +// GCPUExcecutable implementation ////////////////////////////////////////////// +cv::gimpl::onnx::GONNXExecutable::GONNXExecutable(const ade::Graph &g, + const std::vector &nodes) + : m_g(g), m_gm(m_g) { + // FIXME: Currently this backend is capable to run a single inference node only. + // Need to extend our island fusion with merge/not-to-merge decision making parametrization + GConstGONNXModel iem(g); + + for (auto &nh : nodes) { + switch (m_gm.metadata(nh).get().t) { + case NodeType::OP: + if (this_nh == nullptr) { + this_nh = nh; + } + else { + util::throw_error(std::logic_error("Multi-node inference is not supported!")); + } + break; + + case NodeType::DATA: { + m_dataNodes.push_back(nh); + const auto &desc = m_gm.metadata(nh).get(); + if (desc.storage == Data::Storage::CONST_VAL) { + util::throw_error(std::logic_error("No const data supported in backend!")); + } + if (desc.storage == Data::Storage::INTERNAL) { + util::throw_error(std::logic_error("No internal data supported in backend!")); + } + break; + } + default: util::throw_error(std::logic_error("Unsupported NodeType")); + } + } +} + +// FIXME: Document what it does +cv::GArg cv::gimpl::onnx::GONNXExecutable::packArg(const cv::GArg &arg) { + // No API placeholders allowed at this point + // FIXME: this check has to be done somewhere in compilation stage. + GAPI_Assert( arg.kind != cv::detail::ArgKind::GMAT + && arg.kind != cv::detail::ArgKind::GSCALAR + && arg.kind != cv::detail::ArgKind::GARRAY + && arg.kind != cv::detail::ArgKind::GOPAQUE); + + if (arg.kind != cv::detail::ArgKind::GOBJREF) { + util::throw_error(std::logic_error("Inference supports G-types ONLY!")); + } + GAPI_Assert(arg.kind == cv::detail::ArgKind::GOBJREF); + + // Wrap associated CPU object (either host or an internal one) + // FIXME: object can be moved out!!! GExecutor faced that. + const cv::gimpl::RcDesc &ref = arg.get(); + switch (ref.shape) + { + case GShape::GMAT: return GArg(m_res.slot()[ref.id]); + + // Note: .at() is intentional for GArray as object MUST be already there + // (and constructed by either bindIn/Out or resetInternal) + case GShape::GARRAY: return GArg(m_res.slot().at(ref.id)); + + // Note: .at() is intentional for GOpaque as object MUST be already there + // (and constructed by either bindIn/Out or resetInternal) + case GShape::GOPAQUE: return GArg(m_res.slot().at(ref.id)); + + default: + util::throw_error(std::logic_error("Unsupported GShape type")); + break; + } +} + +void cv::gimpl::onnx::GONNXExecutable::run(std::vector &&input_objs, + std::vector &&output_objs) { + // Update resources with run-time information - what this Island + // has received from user (or from another Island, or mix...) + // FIXME: Check input/output objects against GIsland protocol + + for (auto& it : input_objs) magazine::bindInArg (m_res, it.first, it.second); + for (auto& it : output_objs) magazine::bindOutArg(m_res, it.first, it.second); + + // FIXME: Running just a single node now. + // Not sure if need to support many of them, though + // FIXME: Make this island-unmergeable? + const auto &op = m_gm.metadata(this_nh).get(); + + // Initialize kernel's execution context: + // - Input parameters + ONNXCallContext context; + context.args.reserve(op.args.size()); + using namespace std::placeholders; + ade::util::transform(op.args, + std::back_inserter(context.args), + std::bind(&GONNXExecutable::packArg, this, _1)); + + // - Output parameters. + for (const auto &out_it : ade::util::indexed(op.outs)) { + // FIXME: Can the same GArg type resolution mechanism be reused here? + const auto out_port = ade::util::index(out_it); + const auto out_desc = ade::util::value(out_it); + context.results[out_port] = magazine::getObjPtr(m_res, out_desc); + } + + // And now trigger the execution + GConstGONNXModel giem(m_g); + const auto &uu = giem.metadata(this_nh).get(); + const auto &kk = giem.metadata(this_nh).get(); + kk.run(uu, context); + + for (auto &it : output_objs) magazine::writeBack(m_res, it.first, it.second); +} + +namespace cv { +namespace gimpl { +namespace onnx { + +ONNXCompiled::ONNXCompiled(const gapi::onnx::detail::ParamDesc &pp) + : params(pp) { + + // Validate input parameters before allocating any resources + if (params.num_in > 1u && params.num_in != params.input_names.size()) { + cv::util::throw_error(std::logic_error("Please specify input layer names for " + + params.model_path)); + } + if (params.num_out > 1u && params.num_out != params.output_names.size()) { + cv::util::throw_error(std::logic_error("Please specify output layer names for " + + params.model_path)); + } + + // Create and initialize the ONNX session + Ort::SessionOptions session_options; + this_env = Ort::Env(ORT_LOGGING_LEVEL_WARNING, ""); + this_session = Ort::Session(this_env, params.model_path.data(), session_options); + this_memory_info = Ort::MemoryInfo::CreateCpu(OrtArenaAllocator, OrtMemTypeDefault); + + in_tensor_info = getTensorInfo(INPUT); + out_tensor_info = getTensorInfo(OUTPUT); + + const auto is_dyn = [](const TensorInfo &ti) { + return ti.is_dynamic; + }; + is_dynamic = ade::util::any_of(in_tensor_info, is_dyn) + || ade::util::any_of(out_tensor_info, is_dyn); + if (is_dynamic && !params.custom_post_proc) { + util::throw_error(std::logic_error("This network has dynamic shapes. " + "Please provide a custom post-processing function " + "(.cfgPostProc) in network parameters")); + } + + // Update parameters based on session information + if (params.num_in == 1u && params.input_names.empty()) { + params.input_names = { in_tensor_info.front().name }; + } + if (params.num_out == 1u && params.output_names.empty()) { + params.output_names = { out_tensor_info.front().name }; + } + + // Validate what is supported currently + GAPI_Assert(params.const_inputs.empty() + && "Const inputs are not currently supported"); + GAPI_Assert(std::all_of(in_tensor_info.begin(), + in_tensor_info.end(), + [](const cv::gimpl::onnx::TensorInfo &p) { + return p.type == ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT + || p.type == ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT8; + }) + && "Only FP32 and U8 inputs for NN are supported"); + + // Put mean and std in appropriate tensor params + if (!params.mean.empty() || !params.stdev.empty()) { + GAPI_Assert(params.mean.size() == params.stdev.size() && + params.mean.size() == params.input_names.size()); + for (auto idx : ade::util::iota(params.num_in)) { + const auto ort_idx = getIdxByName(in_tensor_info, params.input_names[idx]); + using M = TensorInfo::MeanStdev; + in_tensor_info[ort_idx].mstd = util::make_optional(M{ params.mean[idx] + , params.stdev[idx] }); + } + } + + // Update normalize flags for input tensors + if (!params.normalize.empty()) { + for (auto idx : ade::util::iota(params.num_in)) { + const auto ort_idx = getIdxByName(in_tensor_info, params.input_names[idx]); + in_tensor_info[ort_idx].normalize = params.normalize[idx]; + } + } + + // Pre-allocate vectors (not buffers) for runtime info + in_data.resize(params.num_in); + out_data.resize(params.num_out); +} + +std::vector ONNXCompiled::getTensorInfo(TensorPosition pos) { + GAPI_Assert(pos == INPUT || pos == OUTPUT); + + const auto num_nodes = pos == INPUT + ? this_session.GetInputCount() + : this_session.GetOutputCount(); + + std::vector tensor_info; + tensor_info.reserve(num_nodes); + + Ort::AllocatorWithDefaultOptions allocator; + for (auto i : ade::util::iota(num_nodes)) { + const auto info = pos == INPUT + ? this_session.GetInputTypeInfo(i) + : this_session.GetOutputTypeInfo(i); + tensor_info.emplace_back(info.GetTensorTypeAndShapeInfo()); + + char *name_p = pos == INPUT + ? this_session.GetInputName(i, allocator) + : this_session.GetOutputName(i, allocator); + tensor_info.back().name = name_p; + allocator.Free(name_p); + } + + return tensor_info; +} + +cv::GMatDesc ONNXCompiled::outMeta(int idx) const { + if (is_dynamic) { + GAPI_Assert(!params.out_metas.empty() + && "Metadata must be specified if NN has dynamic inputs!"); + return params.out_metas.at(idx); + } + const auto ort_idx = getIdxByName(out_tensor_info, params.output_names[idx]); + return cv::GMatDesc(toCV(out_tensor_info[ort_idx].type), + toCV(out_tensor_info[ort_idx].dims)); +} + +void ONNXCompiled::setInput(int i, const cv::Mat &m) { + const auto in_idx = i; + const auto in_name = params.input_names[in_idx]; + const auto ort_idx = getIdxByName(in_tensor_info, in_name); + preprocess(m, in_tensor_info[ort_idx], in_data[in_idx]); +} + +void ONNXCompiled::setOutput(int i, cv::Mat &m) { + // FIXME: No need in double-indexing? + out_data[i] = m; +} + +cv::Mat ONNXCompiled::allocOutput(int i) const { + cv::Mat m; + m.create(toCV(out_tensor_info[i].dims), + toCV(out_tensor_info[i].type)); + return m; +} + +void ONNXCompiled::Run(const std::vector& ins, + const std::vector& outs) { + std::vector in_tensors, out_tensors; + + auto in_run_names = getCharNames(params.input_names); + + for (const auto it : ade::util::indexed(params.input_names)) { + auto i = ade::util::index(it); + auto in_name = ade::util::value(it); + const auto idx = getIdxByName(in_tensor_info, in_name); + in_tensors.emplace_back(createTensor(this_memory_info, + in_tensor_info[idx], + ins[i])); + } + + if (!is_dynamic) { + // Easy path - just run the session which is bound to G-API's + // internal data + for (auto i : ade::util::iota(params.output_names.size())) { + out_tensors.emplace_back(createTensor(this_memory_info, + out_tensor_info[i], + outs[i])); + } + auto out_run_names = getCharNames(params.output_names); + this_session.Run(Ort::RunOptions{nullptr}, + in_run_names.data(), + &in_tensors.front(), + params.input_names.size(), + out_run_names.data(), + &out_tensors.front(), + params.output_names.size()); + } else { + // Hard path - run session & user-defined post-processing + // NOTE: use another list of output names here + std::vector out_names; + for (auto &&ti : out_tensor_info) { + out_names.push_back(ti.name.c_str()); + } + + auto outputs = this_session.Run(Ort::RunOptions{nullptr}, + in_run_names.data(), + &in_tensors.front(), + params.input_names.size(), + out_names.data(), + out_names.size()); + std::unordered_map onnx_outputs; + std::unordered_map gapi_outputs; + + GAPI_Assert(outputs.size() == out_names.size()); + // Fill in ONNX tensors + for (auto &&iter : ade::util::zip(ade::util::toRange(out_tensor_info), + ade::util::toRange(outputs))) { + const auto &out_name = std::get<0>(iter).name; + auto &out_tensor = std::get<1>(iter); + onnx_outputs[out_name] = toCV(out_tensor); + } + + // Fill in G-API outputs + for (auto &&it: ade::util::indexed(params.output_names)) { + gapi_outputs[ade::util::value(it)] = outs[ade::util::index(it)]; + } + params.custom_post_proc(onnx_outputs, gapi_outputs); + } +} + +void ONNXCompiled::run() { + Run(in_data, out_data); +} + +struct Infer: public cv::detail::KernelTag { + using API = cv::GInferBase; + static cv::gapi::GBackend backend() { return cv::gapi::onnx::backend(); } + static KImpl kernel() { return KImpl{outMeta, run}; } + + static cv::GMetaArgs outMeta(const ade::Graph &gr, + const ade::NodeHandle &nh, + const cv::GMetaArgs &in_metas, + const cv::GArgs &/*in_args*/) { + cv::GMetaArgs result; + + GConstGONNXModel gm(gr); + const auto &uu = gm.metadata(nh).get(); + + GAPI_Assert(uu.oc->numInputs() == in_metas.size() + && "Known input layers count doesn't match input meta count"); + for (auto &&mm : in_metas) { + GAPI_Assert(util::holds_alternative(mm) + && "Non-GMat inputs are not supported"); + } + for (auto &&idx : ade::util::iota(uu.oc->numOutputs())) { + result.emplace_back(uu.oc->outMeta(idx)); + } + return result; + } + + static void run(const ONNXUnit &uu, ONNXCallContext &ctx) { + for (auto &&idx : ade::util::iota(uu.oc->numInputs())) { + uu.oc->setInput(idx, ctx.inMat(idx)); + } + for (auto &&idx : ade::util::iota(uu.oc->numOutputs())) { + uu.oc->setOutput(idx, ctx.outMatR(idx)); + } + uu.oc->run(); + } +}; + +struct InferROI: public cv::detail::KernelTag { + using API = cv::GInferROIBase; + static cv::gapi::GBackend backend() { return cv::gapi::onnx::backend(); } + static KImpl kernel() { return KImpl{outMeta, run}; } + + static cv::GMetaArgs outMeta(const ade::Graph &gr, + const ade::NodeHandle &nh, + const cv::GMetaArgs &in_metas, + const cv::GArgs &/*in_args*/) { + cv::GMetaArgs result; + + GConstGONNXModel gm(gr); + const auto &uu = gm.metadata(nh).get(); + GAPI_Assert(1u == uu.oc->numInputs()); + GAPI_Assert(2u == in_metas.size()); + + for (auto &&idx : ade::util::iota(uu.oc->numOutputs())) { + result.emplace_back(uu.oc->outMeta(idx)); + } + return result; + } + + static void run(const ONNXUnit &uu, ONNXCallContext &ctx) { + // non-generic version for now, per the InferROI's definition + GAPI_Assert(uu.oc->numInputs() == 1u); + const auto& this_roi = ctx.inArg(0).rref(); + const auto this_mat = ctx.inMat(1); + + uu.oc->setInput(0, this_mat(this_roi)); + for (auto &&idx : ade::util::iota(uu.oc->numOutputs())) { + uu.oc->setOutput(idx, ctx.outMatR(idx)); + } + uu.oc->run(); + } +}; + +struct InferList: public cv::detail::KernelTag { + using API = cv::GInferListBase; + static cv::gapi::GBackend backend() { return cv::gapi::onnx::backend(); } + static KImpl kernel() { return KImpl{outMeta, run}; } + + static cv::GMetaArgs outMeta(const ade::Graph &gr, + const ade::NodeHandle &nh, + const cv::GMetaArgs &in_metas, + const cv::GArgs &/*in_args*/) { + GConstGONNXModel gm(gr); + const auto &uu = gm.metadata(nh).get(); + + // Note our input layers list order matches the API order and so + // meta order. + GAPI_Assert(uu.oc->numInputs() == (in_metas.size() - 1u) + && "Known input layers count doesn't match input meta count"); + + for (auto i : ade::util::iota(uu.oc->numInputs())) { + const auto & mm = in_metas[i + 1]; + + GAPI_Assert(util::holds_alternative(mm) + && "Non-GMat inputs are not supported"); + } + + // roi-list version is much easier at the moment. + // All our outputs are vectors which don't have + // metadata at the moment - so just create a vector of + // "empty" array metadatas of the required size. + return cv::GMetaArgs(uu.oc->numOutputs(), + cv::GMetaArg{cv::empty_array_desc()}); + } + + static void run(const ONNXUnit &uu, ONNXCallContext &ctx) { + // non-generic version for now: + // - assumes input 0 is always ROI list + // - assumes all inputs/outputs are always Mats + GAPI_Assert(uu.oc->numInputs() == 1); // roi list is not counted in net's inputs + + const auto& in_roi_vec = ctx.inArg(0u).rref(); + const cv::Mat this_mat = ctx.inMat(1u); + + for (auto i : ade::util::iota(uu.oc->numOutputs())) { + ctx.outVecR(i).clear(); + } + for (const auto &rc : in_roi_vec) { + uu.oc->setInput(0, this_mat(rc)); + std::vector out_mats(uu.oc->numOutputs()); + for (auto i : ade::util::iota(uu.oc->numOutputs())) { + out_mats[i] = uu.oc->allocOutput(i); + uu.oc->setOutput(i, out_mats[i]); + } + uu.oc->run(); + for (auto i : ade::util::iota(uu.oc->numOutputs())) { + std::vector &out_vec = ctx.outVecR(i); + out_vec.push_back(std::move(out_mats[i])); + } + } + } +}; + +struct InferList2: public cv::detail::KernelTag { + using API = cv::GInferList2Base; + static cv::gapi::GBackend backend() { return cv::gapi::onnx::backend(); } + static KImpl kernel() { return KImpl{outMeta, run}; } + + static cv::GMetaArgs outMeta(const ade::Graph &gr, + const ade::NodeHandle &nh, + const cv::GMetaArgs &in_metas, + const cv::GArgs &/*in_args*/) { + + GConstGONNXModel gm(gr); + const auto &uu = gm.metadata(nh).get(); + + // Note our input layers list order matches the API order and so + // meta order. + GAPI_Assert(uu.oc->numInputs() == (in_metas.size() - 1u) + && "Known input layers count doesn't match input meta count"); + + // In contrast to InferList, the InferList2 has only one + // "full-frame" image argument, and all the rest are arrays of + // ether ROI or blobs. So here we set the 0th arg image format + // to all inputs which are ROI-based (skipping the + // "blob"-based ones) + // FIXME: this is filtering not done, actually! GArrayDesc has + // no hint for type! + const auto &mm_0 = in_metas[0u]; + const auto &meta_0 = util::get(mm_0); + GAPI_Assert( !meta_0.isND() + && !meta_0.planar + && "Only images are supported as the 0th argument"); + for (auto i : ade::util::iota(uu.oc->numInputs())) { + const auto &mm = in_metas[i + 1]; + GAPI_Assert(util::holds_alternative(mm) + && "Non-array inputs are not supported"); + } + + // roi-list version is much easier at the moment. + // All our outputs are vectors which don't have + // metadata at the moment - so just create a vector of + // "empty" array metadatas of the required size. + return cv::GMetaArgs(uu.oc->numOutputs(), + cv::GMetaArg{cv::empty_array_desc()}); + } + + static void run(const ONNXUnit &uu, ONNXCallContext &ctx) { + GAPI_Assert(ctx.args.size() > 1u + && "This operation must have at least two arguments"); + + // Since we do a ROI list inference, always assume our input buffer is image + const cv::Mat mat_0 = ctx.inMat(0u); + // Take the next argument, which must be vector (of any kind). + // Use this only to obtain the ROI list size (sizes of all + // other vectors must be equal to this one) + const auto list_size = ctx.inArg(1u).size(); + + for (auto i : ade::util::iota(uu.oc->numOutputs())) { + ctx.outVecR(i).clear(); + } + // For every ROI in the list {{{ + for (const auto &list_idx : ade::util::iota(list_size)) { + std::vector in_tensors, out_tensors; + std::vector in_mats(uu.oc->numInputs()); + // For every input of the net {{{ + for (auto in_idx : ade::util::iota(uu.oc->numInputs())) { + const auto &this_vec = ctx.inArg(in_idx+1u); + GAPI_Assert(this_vec.size() == list_size); + // Prepare input {{{ + // FIXME: Terrible run-time logic based on RTTI! + // FIXME: Will never work on non-RTTI systems! + // FIXME: Need to replace with a static type tags + // (like with serialization) instead! + if (this_vec.holds()) { + // ROI case - create an ROI blob + const auto &vec = this_vec.rref(); + uu.oc->setInput(in_idx, mat_0(vec[list_idx])); + } else if (this_vec.holds()) { + // Mat case - create a regular blob + // FIXME: NOW Assume Mats are always BLOBS (not + // images) + const auto &vec = this_vec.rref(); + uu.oc->setInput(in_idx, vec[list_idx]); + } else { + GAPI_Assert(false && "Only Rect and Mat types are supported for infer list 2!"); + } + // }}} (Preapre input) + } // }}} (For every input of the net) + + std::vector out_mats(uu.oc->numOutputs()); + for (auto i : ade::util::iota(uu.oc->numOutputs())) { + out_mats[i] = uu.oc->allocOutput(i); + uu.oc->setOutput(i, out_mats[i]); + } + uu.oc->run(); + + for (auto i : ade::util::iota(uu.oc->numOutputs())) { + std::vector &out_vec = ctx.outVecR(i); + out_vec.push_back(std::move(out_mats[i])); + } + } // }}} (For every ROI in the list) + } +}; + +} // namespace onnx +} // namespace gapi +} // namespace cv + +namespace { + class GONNXBackendImpl final: public cv::gapi::GBackend::Priv { + virtual void unpackKernel(ade::Graph &gr, + const ade::NodeHandle &nh, + const cv::GKernelImpl &ii) override { + using namespace cv::gimpl; + // FIXME: Introduce a DNNBackend interface which'd specify + // the framework for this??? + GONNXModel gm(gr); + const auto &np = gm.metadata(nh).get(); + const auto &pp = cv::util::any_cast(np.opaque); + const auto &ki = cv::util::any_cast(ii.opaque); + gm.metadata(nh).set(ONNXUnit{pp}); + gm.metadata(nh).set(ONNXCallable{ki.run}); + gm.metadata(nh).set(CustomMetaFunction{ki.customMetaFunc}); + } + + virtual EPtr compile(const ade::Graph &graph, + const cv::GCompileArgs &, + const std::vector &nodes) const override { + return EPtr{new cv::gimpl::onnx::GONNXExecutable(graph, nodes)}; + } + + virtual cv::gapi::GKernelPackage auxiliaryKernels() const override { + return cv::gapi::kernels< cv::gimpl::onnx::Infer + , cv::gimpl::onnx::InferROI + , cv::gimpl::onnx::InferList + , cv::gimpl::onnx::InferList2 + >(); + } + }; +} + +cv::gapi::GBackend cv::gapi::onnx::backend() { + static cv::gapi::GBackend this_backend(std::make_shared()); + return this_backend; +} +#else // HAVE_ONNX + +cv::gapi::GBackend cv::gapi::onnx::backend() { + // Still provide this symbol to avoid linking issues + util::throw_error(std::runtime_error("G-API has been compiled without ONNX support")); +} +#endif // HAVE_ONNX diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/backends/onnx/gonnxbackend.hpp b/inference-engine/thirdparty/fluid/modules/gapi/src/backends/onnx/gonnxbackend.hpp new file mode 100644 index 00000000000..a3cc8970309 --- /dev/null +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/backends/onnx/gonnxbackend.hpp @@ -0,0 +1,56 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Copyright (C) 2020 Intel Corporation + +#ifndef OPENCV_GAPI_GONNXBACKEND_HPP +#define OPENCV_GAPI_GONNXBACKEND_HPP + +#include "opencv2/gapi/infer/onnx.hpp" +#ifdef HAVE_ONNX + +#include +#include // type_list_index + +#include "backends/common/gbackend.hpp" + +namespace cv { +namespace gimpl { +namespace onnx { + +class GONNXExecutable final: public GIslandExecutable +{ + const ade::Graph &m_g; + GModel::ConstGraph m_gm; + + // The only executable stuff in this graph + // (assuming it is always single-op) + ade::NodeHandle this_nh; + + // List of all resources in graph (both internal and external) + std::vector m_dataNodes; + + // Actual data of all resources in graph (both internal and external) + Mag m_res; + + // Execution helpers + GArg packArg(const GArg &arg); + +public: + GONNXExecutable(const ade::Graph &graph, + const std::vector &nodes); + + virtual inline bool canReshape() const override { return false; } + virtual inline void reshape(ade::Graph&, const GCompileArgs&) override { + GAPI_Assert(false); // Not implemented yet + } + + virtual void run(std::vector &&input_objs, + std::vector &&output_objs) override; +}; + +}}} // namespace cv::gimpl::onnx + +#endif // HAVE_ONNX +#endif // OPENCV_GAPI_GONNXBACKEND_HPP diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/backends/plaidml/gplaidmlbackend.cpp b/inference-engine/thirdparty/fluid/modules/gapi/src/backends/plaidml/gplaidmlbackend.cpp index d2c78eb3d59..ebce62918ce 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/src/backends/plaidml/gplaidmlbackend.cpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/backends/plaidml/gplaidmlbackend.cpp @@ -198,6 +198,9 @@ void cv::gimpl::GPlaidMLExecutable::run(std::vector &&input_objs, exec_->run(); for (auto& it : output_objs) bindOutArg(it.first, it.second); + + // FIXME: + // PlaidML backend haven't been updated with RMat support } void cv::gimpl::GPlaidMLExecutable::bindInArg(const RcDesc &rc, const GRunArg &arg) diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/backends/render/ft_render.cpp b/inference-engine/thirdparty/fluid/modules/gapi/src/backends/render/ft_render.cpp new file mode 100644 index 00000000000..fcf84713ff3 --- /dev/null +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/backends/render/ft_render.cpp @@ -0,0 +1,236 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Copyright (C) 2019 Intel Corporation + +#include "precomp.hpp" +#include "ft_render.hpp" + +#ifdef HAVE_FREETYPE + +#include "ft_render_priv.hpp" + +#include +#include + +cv::gapi::wip::draw::FTTextRender::Priv::Priv(const std::string& path) +{ + if (FT_Init_FreeType(&m_library) != 0) + { + cv::util::throw_error(std::runtime_error("Failed to initialize FT")); + } + + if (FT_New_Face(m_library, path.c_str(), 0, &m_face)) + { + FT_Done_FreeType(m_library); + cv::util::throw_error(std::runtime_error("Failed to create a font face")); + } +} + +cv::Size cv::gapi::wip::draw::FTTextRender::Priv::getTextSize(const std::wstring& text, int fh, int* baseline) +{ + // + // + // + // ^ diff between size and advance(2) + // | ______________ width width |<->| + // | | ** | |<------>| <------------|---> + // | | * * | |________| |____________|___|________ + // | left | * * | left |* * * * | | * * * * *| | ^ ^ + // |<---->| ** ** ** | <----->|* *| | * | | t | | + // | | * * | | |* *| | * | | o | h | + // | | * * | | |* * * * | | * (1) | p | e | baseline + // O------|*------------*|-----O----- |*-------|-|----O--*----O---|-----*-i-|------------> + // | |______________| | |* | |* | * | | ^ g | + // | | | | |* | |* | * | | b | h | + // | | width | | |* | |* | * | | o | t | + // | |<------------>| | |* | | * *|* | | t | | + // | | |________| |____|_______|___|_____|___* + // | advance | advance | |advance| (advance maybe less than width) + // <---------------------------><----------------|----><------> + // |left| (left maybe is negative) + // |<-->| + // + // + // O - The pen position for any time + // + // left (m_face->glyph->bitmap_left) - The horizontal distance from the current pen position to the glyph's left bbox edge. + // + // advance (m_face->glyph->advance.x >> 6) - The horizontal distance to increment (for left-to-right writing) + // or decrement (for right-to-left writing) the pen position after a + // glyph has been rendered when processing text + // + // width (bitmap->width) - The width of glyph + // + // + // Algorithm to compute size of the text bounding box: + // + // 1) Go through all symbols and shift pen position and save glyph parameters (left, advance, width) + // If left + pen position < 0 set left to 0. For example it's maybe happened + // if we print first letter 'J' or any other letter with negative 'left' + // We want to render glyph in pen position + left, so we must't allow it to be negative + // + // 2) If width == 0 we must to skip this symbol and don't save parameters for him. + // For example width == 0 for space sometimes + // + // 3) Also we compute max top and max bottom it's required for compute baseline + // + // 3) At the end we'll get the pen position for the symbol next to the last. + // See (1) on picture. + // + // 4) As we can see the last pen position is isn't horizontal size yet. + // We need to check if the glyph goes beyound the last position of the pen + // To do this we can: + // a) Return to the previous position -advance + // b) Shift on left value +left + // c) Shift on width of the last glyph + // + // Compare result position with pen position and choose max + // + // We can compute diff and check if diff > 0 pen.x += diff. + // See (2) on picture. + // + // 5) Return size. Complete!!! + // + // See also about freetype glyph metrics: + // https://www.freetype.org/freetype2/docs/glyphs/glyphs-3.html + + GAPI_Assert(!FT_Set_Pixel_Sizes(m_face, fh, fh) && + "Failed to set pixel size"); + + cv::Point pen(0, 0); + + int max_bot = 0; + int max_top = 0; + int last_advance = 0; + int last_width = 0; + int last_left = 0; + + for (const auto& wc : text) + { + GAPI_Assert(!FT_Load_Char(m_face, wc, FT_LOAD_RENDER) && + "Failed to load char"); + + FT_Bitmap *bitmap = &(m_face->glyph->bitmap); + + int left = m_face->glyph->bitmap_left; + int advance = (m_face->glyph->advance.x >> 6); + int width = bitmap->width; + + // NB: Read (1) paragraph of algorithm description + if (pen.x + left < 0) + { + left = 0; + } + + int bot = (m_face->glyph->metrics.height - m_face->glyph->metrics.horiBearingY) >> 6; + max_bot = std::max(max_bot, bot); + max_top = std::max(max_top, m_face->glyph->bitmap_top); + + // NB: Read (2) paragraph of algorithm description + if (width != 0) + { + last_width = width; + last_advance = advance; + last_left = left; + } + + pen.x += advance; + } + + // NB: Read (4) paragraph of algorithm description + int diff = (last_width + last_left) - last_advance; + pen.x += (diff > 0) ? diff : 0; + + if (baseline) + { + *baseline = max_bot; + } + + return {pen.x, max_bot + max_top}; +} + +void cv::gapi::wip::draw::FTTextRender::Priv::putText(cv::Mat& mat, + const std::wstring& text, + const cv::Point& org, + int fh) +{ + GAPI_Assert(!FT_Set_Pixel_Sizes(m_face, fh, fh) && + "Failed to set pixel size"); + + cv::Point pen = org; + for (const auto& wc : text) + { + GAPI_Assert(!FT_Load_Char(m_face, wc, FT_LOAD_RENDER) && + "Failed to load char"); + FT_Bitmap *bitmap = &(m_face->glyph->bitmap); + + // FIXME: Skip glyph, if size is 0 + if (bitmap->rows == 0 || bitmap->width == 0) { + continue; + } + + cv::Mat glyph(bitmap->rows, bitmap->width, CV_8UC1, bitmap->buffer, bitmap->pitch); + + int left = m_face->glyph->bitmap_left; + int top = m_face->glyph->bitmap_top; + int advance = (m_face->glyph->advance.x >> 6); + + if (pen.x + left < 0) + { + left = 0; + } + + cv::Rect rect(pen.x + left, org.y - top, glyph.cols, glyph.rows); + + auto roi = mat(rect); + roi += glyph; + pen.x += advance; + } +} + +cv::gapi::wip::draw::FTTextRender::Priv::~Priv() +{ + FT_Done_Face(m_face); + FT_Done_FreeType(m_library); +} + +cv::gapi::wip::draw::FTTextRender::FTTextRender(const std::string& path) + : m_priv(new Priv(path)) +{ +} + +cv::Size cv::gapi::wip::draw::FTTextRender::getTextSize(const std::wstring& text, + int fh, + int* baseline) +{ + return m_priv->getTextSize(text, fh, baseline); +} + +void cv::gapi::wip::draw::FTTextRender::putText(cv::Mat& mat, + const std::wstring& text, + const cv::Point& org, + int fh) +{ + m_priv->putText(mat, text, org, fh); +} + +#else + +cv::Size cv::gapi::wip::draw::FTTextRender::getTextSize(const std::wstring&, int, int*) +{ + cv::util::throw_error(std::runtime_error("Freetype not found")); +} + +void cv::gapi::wip::draw::FTTextRender::putText(cv::Mat&, const std::wstring&, const cv::Point&, int) +{ + cv::util::throw_error(std::runtime_error("Freetype not found")); +} + +cv::gapi::wip::draw::FTTextRender::FTTextRender(const std::string&) +{ + cv::util::throw_error(std::runtime_error("Freetype not found")); +} + +#endif // HAVE_FREETYPE diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/backends/render/ft_render.hpp b/inference-engine/thirdparty/fluid/modules/gapi/src/backends/render/ft_render.hpp new file mode 100644 index 00000000000..068c0d4d3f1 --- /dev/null +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/backends/render/ft_render.hpp @@ -0,0 +1,44 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Copyright (C) 2019 Intel Corporation + +#ifndef OPENCV_FREETYPE_TEXT_RENDER_HPP +#define OPENCV_FREETYPE_TEXT_RENDER_HPP + +#include +#include + +#include + +#include + +namespace cv +{ +namespace gapi +{ +namespace wip +{ +namespace draw +{ + +class GAPI_EXPORTS FTTextRender +{ +public: + class Priv; + explicit FTTextRender(const std::string& path); + + cv::Size getTextSize(const std::wstring& text, int fh, int* baseline); + void putText(cv::Mat& mat, const std::wstring& text, const cv::Point& org, int fh); + +private: + std::shared_ptr m_priv; +}; + +} // namespace draw +} // namespace wip +} // namespace gapi +} // namespace cv + +#endif // OPENCV_FREETYPE_TEXT_RENDER_HPP diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/backends/render/ft_render_priv.hpp b/inference-engine/thirdparty/fluid/modules/gapi/src/backends/render/ft_render_priv.hpp new file mode 100644 index 00000000000..903f439b96b --- /dev/null +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/backends/render/ft_render_priv.hpp @@ -0,0 +1,48 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Copyright (C) 2019 Intel Corporation + + +#ifdef HAVE_FREETYPE + +#ifndef OPENCV_FT_RENDER_PRIV_HPP +#define OPENCV_FT_RENDER_PRIV_HPP + +#include "ft_render.hpp" + +#include +#include FT_FREETYPE_H + +namespace cv +{ +namespace gapi +{ +namespace wip +{ +namespace draw +{ + +class FTTextRender::Priv +{ +public: + explicit Priv(const std::string& path); + + cv::Size getTextSize(const std::wstring& text, int fh, int* baseline); + void putText(cv::Mat& mat, const std::wstring& text, const cv::Point& org, int fh); + + ~Priv(); + +private: + FT_Library m_library; + FT_Face m_face; +}; + +} // namespace draw +} // namespace wip +} // namespace gapi +} // namespace cv + +#endif // OPENCV_FT_RENDER_PRIV_HPP +#endif // HAVE_FREETYPE diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/backends/render/grenderocv.cpp b/inference-engine/thirdparty/fluid/modules/gapi/src/backends/render/grenderocv.cpp index cb4fd1be3a8..71be889d79b 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/src/backends/render/grenderocv.cpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/backends/render/grenderocv.cpp @@ -1,16 +1,21 @@ #include #include "api/render_ocv.hpp" -#include "backends/render/grenderocv.hpp" #include +#include -GAPI_RENDER_OCV_KERNEL(RenderBGROCVImpl, cv::gapi::wip::draw::GRenderBGR) +struct RenderOCVState +{ + std::shared_ptr ftpr; +}; + +GAPI_OCV_KERNEL_ST(RenderBGROCVImpl, cv::gapi::wip::draw::GRenderBGR, RenderOCVState) { static void run(const cv::Mat& in, const cv::gapi::wip::draw::Prims& prims, - cv::gapi::wip::draw::FTTextRender* ftpr, - cv::Mat& out) + cv::Mat& out, + RenderOCVState& state) { // NB: If in and out cv::Mats are the same object // we can avoid copy and render on out cv::Mat @@ -19,18 +24,33 @@ GAPI_RENDER_OCV_KERNEL(RenderBGROCVImpl, cv::gapi::wip::draw::GRenderBGR) in.copyTo(out); } - cv::gapi::wip::draw::drawPrimitivesOCVBGR(out, prims, ftpr); + cv::gapi::wip::draw::drawPrimitivesOCVBGR(out, prims, state.ftpr); + } + + static void setup(const cv::GMatDesc& /* in */, + const cv::GArrayDesc& /* prims */, + std::shared_ptr& state, + const cv::GCompileArgs& args) + { + using namespace cv::gapi::wip::draw; + auto opt_freetype_font = cv::gapi::getCompileArg(args); + state = std::make_shared(); + + if (opt_freetype_font.has_value()) + { + state->ftpr = std::make_shared(opt_freetype_font->path); + } } }; -GAPI_RENDER_OCV_KERNEL(RenderNV12OCVImpl, cv::gapi::wip::draw::GRenderNV12) +GAPI_OCV_KERNEL_ST(RenderNV12OCVImpl, cv::gapi::wip::draw::GRenderNV12, RenderOCVState) { static void run(const cv::Mat& in_y, const cv::Mat& in_uv, const cv::gapi::wip::draw::Prims& prims, - cv::gapi::wip::draw::FTTextRender* ftpr, cv::Mat& out_y, - cv::Mat& out_uv) + cv::Mat& out_uv, + RenderOCVState& state) { // NB: If in and out cv::Mats are the same object // we can avoid copy and render on out cv::Mat @@ -67,7 +87,7 @@ GAPI_RENDER_OCV_KERNEL(RenderNV12OCVImpl, cv::gapi::wip::draw::GRenderNV12) cv::resize(in_uv, upsample_uv, in_uv.size() * 2, cv::INTER_LINEAR); cv::merge(std::vector{in_y, upsample_uv}, yuv); - cv::gapi::wip::draw::drawPrimitivesOCVYUV(yuv, prims, ftpr); + cv::gapi::wip::draw::drawPrimitivesOCVYUV(yuv, prims, state.ftpr); // YUV -> NV12 cv::Mat out_u, out_v, uv_plane; @@ -76,6 +96,22 @@ GAPI_RENDER_OCV_KERNEL(RenderNV12OCVImpl, cv::gapi::wip::draw::GRenderNV12) cv::merge(std::vector{chs[1], chs[2]}, uv_plane); cv::resize(uv_plane, out_uv, uv_plane.size() / 2, cv::INTER_LINEAR); } + + static void setup(const cv::GMatDesc& /* in_y */, + const cv::GMatDesc& /* in_uv */, + const cv::GArrayDesc& /* prims */, + std::shared_ptr& state, + const cv::GCompileArgs& args) + { + using namespace cv::gapi::wip::draw; + auto has_freetype_font = cv::gapi::getCompileArg(args); + state = std::make_shared(); + + if (has_freetype_font) + { + state->ftpr = std::make_shared(has_freetype_font->path); + } + } }; cv::gapi::GKernelPackage cv::gapi::render::ocv::kernels() diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/backends/streaming/gstreamingbackend.cpp b/inference-engine/thirdparty/fluid/modules/gapi/src/backends/streaming/gstreamingbackend.cpp new file mode 100644 index 00000000000..d5f042de0b1 --- /dev/null +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/backends/streaming/gstreamingbackend.cpp @@ -0,0 +1,203 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Copyright (C) 2020 Intel Corporation + +#include +#include // throw_error +#include // kernels + +#include "api/gbackend_priv.hpp" +#include "backends/common/gbackend.hpp" + +#include "gstreamingbackend.hpp" +#include "gstreamingkernel.hpp" + +namespace { + +struct StreamingCreateFunction +{ + static const char *name() { return "StreamingCreateFunction"; } + cv::gapi::streaming::CreateActorFunction createActorFunction; +}; + +using StreamingGraph = ade::TypedGraph + < cv::gimpl::Op + , StreamingCreateFunction + >; + +using ConstStreamingGraph = ade::ConstTypedGraph + < cv::gimpl::Op + , StreamingCreateFunction + >; + + +class GStreamingIntrinExecutable final: public cv::gimpl::GIslandExecutable +{ + virtual void run(std::vector &&, + std::vector &&) override { + GAPI_Assert(false && "Not implemented"); + } + + virtual void run(GIslandExecutable::IInput &in, + GIslandExecutable::IOutput &out) override; + + virtual bool allocatesOutputs() const override { return true; } + // Return an empty RMat since we will reuse the input. + // There is no need to allocate and copy 4k image here. + virtual cv::RMat allocate(const cv::GMatDesc&) const override { return {}; } + + virtual bool canReshape() const override { return true; } + virtual void reshape(ade::Graph&, const cv::GCompileArgs&) override { + // Do nothing here + } + +public: + GStreamingIntrinExecutable(const ade::Graph &, + const std::vector &); + + const ade::Graph& m_g; + cv::gimpl::GModel::ConstGraph m_gm; + cv::gapi::streaming::IActor::Ptr m_actor; +}; + +void GStreamingIntrinExecutable::run(GIslandExecutable::IInput &in, + GIslandExecutable::IOutput &out) +{ + m_actor->run(in, out); +} + +class GStreamingBackendImpl final: public cv::gapi::GBackend::Priv +{ + virtual void unpackKernel(ade::Graph &graph, + const ade::NodeHandle &op_node, + const cv::GKernelImpl &impl) override + { + StreamingGraph gm(graph); + const auto &kimpl = cv::util::any_cast(impl.opaque); + gm.metadata(op_node).set(StreamingCreateFunction{kimpl.createActorFunction}); + } + + virtual EPtr compile(const ade::Graph &graph, + const cv::GCompileArgs &, + const std::vector &nodes) const override + { + return EPtr{new GStreamingIntrinExecutable(graph, nodes)}; + } + + virtual bool controlsMerge() const override + { + return true; + } + + virtual bool allowsMerge(const cv::gimpl::GIslandModel::Graph &, + const ade::NodeHandle &, + const ade::NodeHandle &, + const ade::NodeHandle &) const override + { + return false; + } +}; + +GStreamingIntrinExecutable::GStreamingIntrinExecutable(const ade::Graph& g, + const std::vector& nodes) + : m_g(g), m_gm(m_g) +{ + using namespace cv::gimpl; + const auto is_op = [this](const ade::NodeHandle &nh) + { + return m_gm.metadata(nh).get().t == NodeType::OP; + }; + + auto it = std::find_if(nodes.begin(), nodes.end(), is_op); + GAPI_Assert(it != nodes.end() && "No operators found for this island?!"); + + ConstStreamingGraph cag(m_g); + m_actor = cag.metadata(*it).get().createActorFunction(); + + // Ensure this the only op in the graph + if (std::any_of(it+1, nodes.end(), is_op)) + { + cv::util::throw_error + (std::logic_error + ("Internal error: Streaming subgraph has multiple operations")); + } +} + +} // anonymous namespace + +cv::gapi::GBackend cv::gapi::streaming::backend() +{ + static cv::gapi::GBackend this_backend(std::make_shared()); + return this_backend; +} + +cv::gapi::GKernelPackage cv::gapi::streaming::kernels() +{ + return cv::gapi::kernels(); +} + +void cv::gimpl::Copy::Actor::run(cv::gimpl::GIslandExecutable::IInput &in, + cv::gimpl::GIslandExecutable::IOutput &out) +{ + while (true) + { + const auto in_msg = in.get(); + if (cv::util::holds_alternative(in_msg)) + { + out.post(cv::gimpl::EndOfStream{}); + return; + } + + const cv::GRunArgs &in_args = cv::util::get(in_msg); + GAPI_Assert(in_args.size() == 1u); + + cv::GRunArgP out_arg = out.get(0); + *cv::util::get(out_arg) = cv::util::get(in_args[0]); + out.post(std::move(out_arg)); + } +} + +void cv::gimpl::BGR::Actor::run(cv::gimpl::GIslandExecutable::IInput &in, + cv::gimpl::GIslandExecutable::IOutput &out) +{ + while (true) + { + const auto in_msg = in.get(); + if (cv::util::holds_alternative(in_msg)) + { + out.post(cv::gimpl::EndOfStream{}); + return; + } + + const cv::GRunArgs &in_args = cv::util::get(in_msg); + GAPI_Assert(in_args.size() == 1u); + + cv::GRunArgP out_arg = out.get(0); + auto frame = cv::util::get(in_args[0]); + const auto& desc = frame.desc(); + + auto& rmat = *cv::util::get(out_arg); + switch (desc.fmt) + { + case cv::MediaFormat::BGR: + rmat = cv::make_rmat(frame); + break; + case cv::MediaFormat::NV12: + { + cv::Mat bgr; + auto view = frame.access(cv::MediaFrame::Access::R); + cv::Mat y_plane (desc.size, CV_8UC1, view.ptr[0]); + cv::Mat uv_plane(desc.size / 2, CV_8UC2, view.ptr[1]); + cv::cvtColorTwoPlane(y_plane, uv_plane, bgr, cv::COLOR_YUV2BGR_NV12); + rmat = cv::make_rmat(bgr); + break; + } + default: + cv::util::throw_error( + std::logic_error("Unsupported MediaFormat for cv::gapi::streaming::BGR")); + } + out.post(std::move(out_arg)); + } +} diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/backends/streaming/gstreamingbackend.hpp b/inference-engine/thirdparty/fluid/modules/gapi/src/backends/streaming/gstreamingbackend.hpp new file mode 100644 index 00000000000..bb2100c1592 --- /dev/null +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/backends/streaming/gstreamingbackend.hpp @@ -0,0 +1,89 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Copyright (C) 2020 Intel Corporation + +#ifndef OPENCV_GAPI_GSTREAMINGBACKEND_HPP +#define OPENCV_GAPI_GSTREAMINGBACKEND_HPP + +#include +#include +#include "gstreamingkernel.hpp" + +namespace cv { +namespace gimpl { + +struct RMatMediaBGRAdapter final: public cv::RMat::Adapter +{ + RMatMediaBGRAdapter(cv::MediaFrame frame) : m_frame(frame) { }; + + virtual cv::RMat::View access(cv::RMat::Access a) override + { + auto view = m_frame.access(a == cv::RMat::Access::W ? cv::MediaFrame::Access::W + : cv::MediaFrame::Access::R); + auto ptr = reinterpret_cast(view.ptr[0]); + auto stride = view.stride[0]; + + std::shared_ptr view_ptr = + std::make_shared(std::move(view)); + auto callback = [view_ptr]() mutable { view_ptr.reset(); }; + + return cv::RMat::View(desc(), ptr, stride, callback); + } + + virtual cv::GMatDesc desc() const override + { + const auto& desc = m_frame.desc(); + GAPI_Assert(desc.fmt == cv::MediaFormat::BGR); + return cv::GMatDesc{CV_8U, 3, desc.size}; + } + + cv::MediaFrame m_frame; +}; + +struct Copy: public cv::detail::KernelTag +{ + using API = cv::gapi::streaming::GCopy; + + static gapi::GBackend backend() { return cv::gapi::streaming::backend(); } + + class Actor final: public cv::gapi::streaming::IActor + { + public: + explicit Actor() {} + virtual void run(cv::gimpl::GIslandExecutable::IInput &in, + cv::gimpl::GIslandExecutable::IOutput &out) override; + }; + + static cv::gapi::streaming::IActor::Ptr create() + { + return cv::gapi::streaming::IActor::Ptr(new Actor()); + } + + static cv::gapi::streaming::GStreamingKernel kernel() { return {&create}; }; +}; + +struct BGR: public cv::detail::KernelTag +{ + using API = cv::gapi::streaming::GBGR; + static gapi::GBackend backend() { return cv::gapi::streaming::backend(); } + + class Actor final: public cv::gapi::streaming::IActor { + public: + explicit Actor() {} + virtual void run(cv::gimpl::GIslandExecutable::IInput &in, + cv::gimpl::GIslandExecutable::IOutput&out) override; + }; + + static cv::gapi::streaming::IActor::Ptr create() + { + return cv::gapi::streaming::IActor::Ptr(new Actor()); + } + static cv::gapi::streaming::GStreamingKernel kernel() { return {&create}; }; +}; + +} // namespace gimpl +} // namespace cv + +#endif // OPENCV_GAPI_GSTREAMINGBACKEND_HPP diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/backends/streaming/gstreamingkernel.hpp b/inference-engine/thirdparty/fluid/modules/gapi/src/backends/streaming/gstreamingkernel.hpp new file mode 100644 index 00000000000..4732262aa07 --- /dev/null +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/backends/streaming/gstreamingkernel.hpp @@ -0,0 +1,37 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Copyright (C) 2020 Intel Corporation + + +#ifndef OPENCV_GAPI_GSTREAMINGKERNEL_HPP +#define OPENCV_GAPI_GSTREAMINGKERNEL_HPP + +#include "compiler/gislandmodel.hpp" + +namespace cv { +namespace gapi { +namespace streaming { + +class IActor { +public: + using Ptr = std::shared_ptr; + + virtual void run(cv::gimpl::GIslandExecutable::IInput &in, + cv::gimpl::GIslandExecutable::IOutput &out) = 0; + + virtual ~IActor() = default; +}; + +using CreateActorFunction = std::function; +struct GStreamingKernel +{ + CreateActorFunction createActorFunction; +}; + +} // namespace streaming +} // namespace gapi +} // namespace cv + +#endif // OPENCV_GAPI_GSTREAMINGKERNEL_HPP diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/compiler/gcompiled.cpp b/inference-engine/thirdparty/fluid/modules/gapi/src/compiler/gcompiled.cpp index 00340da095f..263878ce0d4 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/src/compiler/gcompiled.cpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/compiler/gcompiled.cpp @@ -57,6 +57,11 @@ void cv::GCompiled::Priv::checkArgs(const cv::gimpl::GRuntimeArgs &args) const // FIXME: Add details on what is actually wrong } validate_input_args(args.inObjs); + // FIXME: Actually, the passed parameter vector is never checked + // against its shapes - so if you compile with GScalarDesc passed + // for GMat argument, you will get your compilation right (!!) + // Probably it was there but somehow that olds checks (if they + // exist) are bypassed now. } bool cv::GCompiled::Priv::canReshape() const @@ -97,6 +102,7 @@ cv::GCompiled::operator bool() const void cv::GCompiled::operator() (GRunArgs &&ins, GRunArgsP &&outs) { + // FIXME: Check that matches the protocol!!! // FIXME: Check that matches the protocol m_priv->run(cv::gimpl::GRuntimeArgs{std::move(ins),std::move(outs)}); } diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/compiler/gcompiled_priv.hpp b/inference-engine/thirdparty/fluid/modules/gapi/src/compiler/gcompiled_priv.hpp index f21bfc80bc2..b08b1f9c59f 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/src/compiler/gcompiled_priv.hpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/compiler/gcompiled_priv.hpp @@ -38,6 +38,10 @@ class GAPI_EXPORTS GCompiled::Priv GMetaArgs m_outMetas; // inferred by compiler std::unique_ptr m_exec; + // NB: Used by python wrapper to clarify input/output types + GTypesInfo m_out_info; + GTypesInfo m_in_info; + void checkArgs(const cv::gimpl::GRuntimeArgs &args) const; public: @@ -55,6 +59,12 @@ public: const GMetaArgs& outMetas() const; const cv::gimpl::GModel::Graph& model() const; + + void setOutInfo(const GTypesInfo& info) { m_out_info = std::move(info); } + const GTypesInfo& outInfo() const { return m_out_info; } + + void setInInfo(const GTypesInfo& info) { m_in_info = std::move(info); } + const GTypesInfo& inInfo() const { return m_in_info; } }; } diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/compiler/gcompiler.cpp b/inference-engine/thirdparty/fluid/modules/gapi/src/compiler/gcompiler.cpp index 6f137a30242..1f1cbf9dbfd 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/src/compiler/gcompiler.cpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/compiler/gcompiler.cpp @@ -35,6 +35,7 @@ #include "executor/gexecutor.hpp" #include "executor/gstreamingexecutor.hpp" #include "backends/common/gbackend.hpp" +#include "backends/common/gmetabackend.hpp" // #if !defined(GAPI_STANDALONE) @@ -42,6 +43,7 @@ #include // ...Imgproc #include // ...and Video kernel implementations #include // render::ocv::backend() +#include // streaming::kernels() #endif // !defined(GAPI_STANDALONE) // @@ -58,7 +60,8 @@ namespace for (const auto &b : pkg.backends()) { aux_pkg = combine(aux_pkg, b.priv().auxiliaryKernels()); } - return combine(pkg, aux_pkg); + // Always include built-in meta<> implementation + return combine(pkg, aux_pkg, cv::gimpl::meta::kernels()); }; auto has_use_only = cv::gapi::getCompileArg(args); @@ -70,7 +73,8 @@ namespace combine(cv::gapi::core::cpu::kernels(), cv::gapi::imgproc::cpu::kernels(), cv::gapi::video::cpu::kernels(), - cv::gapi::render::ocv::kernels()); + cv::gapi::render::ocv::kernels(), + cv::gapi::streaming::kernels()); #else cv::gapi::GKernelPackage(); #endif // !defined(GAPI_STANDALONE) @@ -91,7 +95,7 @@ namespace auto dump_info = cv::gapi::getCompileArg(args); if (!dump_info.has_value()) { - const char* path = getenv("GRAPH_DUMP_PATH"); + const char* path = std::getenv("GRAPH_DUMP_PATH"); return path ? cv::util::make_optional(std::string(path)) : cv::util::optional(); @@ -238,6 +242,11 @@ cv::gimpl::GCompiler::GCompiler(const cv::GComputation &c, // (no compound backend present here) m_e.addPass("kernels", "check_islands_content", passes::checkIslandsContent); + // Special stage for intrinsics handling + m_e.addPassStage("intrin"); + m_e.addPass("intrin", "desync", passes::intrinDesync); + m_e.addPass("intrin", "finalizeIntrin", passes::intrinFinalize); + //Input metas may be empty when a graph is compiled for streaming m_e.addPassStage("meta"); if (!m_metas.empty()) @@ -311,9 +320,11 @@ void cv::gimpl::GCompiler::validateInputMeta() // FIXME: Auto-generate methods like this from traits: case GProtoArg::index_of(): case GProtoArg::index_of(): - case GProtoArg::index_of(): return util::holds_alternative(meta); + case GProtoArg::index_of(): + return util::holds_alternative(meta); + case GProtoArg::index_of(): return util::holds_alternative(meta); @@ -382,6 +393,9 @@ cv::gimpl::GCompiler::GPtr cv::gimpl::GCompiler::generateGraph() { GModel::Graph(*g).metadata().set(OriginalInputMeta{m_metas}); } + // FIXME: remove m_args, remove GCompileArgs from backends' method signatures, + // rework backends to access GCompileArgs from graph metadata + GModel::Graph(*g).metadata().set(CompileArgs{m_args}); return g; } @@ -405,6 +419,19 @@ void cv::gimpl::GCompiler::compileIslands(ade::Graph &g, const cv::GCompileArgs GIslandModel::compileIslands(gim, g, args); } +static cv::GTypesInfo collectInfo(const cv::gimpl::GModel::ConstGraph& g, + const std::vector& nhs) { + cv::GTypesInfo info; + info.reserve(nhs.size()); + + ade::util::transform(nhs, std::back_inserter(info), [&g](const ade::NodeHandle& nh) { + const auto& data = g.metadata(nh).get(); + return cv::GTypeInfo{data.shape, data.kind}; + }); + + return info; +} + cv::GCompiled cv::gimpl::GCompiler::produceCompiled(GPtr &&pg) { // This is the final compilation step. Here: @@ -423,6 +450,8 @@ cv::GCompiled cv::gimpl::GCompiler::produceCompiled(GPtr &&pg) // an execution plan for it (backend-specific execution) // ...before call to produceCompiled(); + GModel::ConstGraph cgr(*pg); + const auto &outMetas = GModel::ConstGraph(*pg).metadata() .get().outMeta; std::unique_ptr pE(new GExecutor(std::move(pg))); @@ -431,6 +460,14 @@ cv::GCompiled cv::gimpl::GCompiler::produceCompiled(GPtr &&pg) GCompiled compiled; compiled.priv().setup(m_metas, outMetas, std::move(pE)); + + // NB: Need to store input/output GTypeInfo to allocate output arrays for python bindings + auto out_meta = collectInfo(cgr, cgr.metadata().get().out_nhs); + auto in_meta = collectInfo(cgr, cgr.metadata().get().in_nhs); + + compiled.priv().setOutInfo(std::move(out_meta)); + compiled.priv().setInInfo(std::move(in_meta)); + return compiled; } @@ -446,6 +483,16 @@ cv::GStreamingCompiled cv::gimpl::GCompiler::produceStreamingCompiled(GPtr &&pg) outMetas = GModel::ConstGraph(*pg).metadata().get().outMeta; } + + GModel::ConstGraph cgr(*pg); + + // NB: Need to store input/output GTypeInfo to allocate output arrays for python bindings + auto out_meta = collectInfo(cgr, cgr.metadata().get().out_nhs); + auto in_meta = collectInfo(cgr, cgr.metadata().get().in_nhs); + + compiled.priv().setOutInfo(std::move(out_meta)); + compiled.priv().setInInfo(std::move(in_meta)); + std::unique_ptr pE(new GStreamingExecutor(std::move(pg), m_args)); if (!m_metas.empty() && !outMetas.empty()) @@ -526,7 +573,7 @@ cv::gimpl::GCompiler::GPtr cv::gimpl::GCompiler::makeGraph(const cv::GComputatio gm.metadata().set(p); } else if (cv::util::holds_alternative(priv.m_shape)) { auto c_dump = cv::util::get(priv.m_shape); - cv::gimpl::s11n::reconstruct(c_dump, g); + cv::gapi::s11n::reconstruct(c_dump, g); } return pG; } diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/compiler/gislandmodel.cpp b/inference-engine/thirdparty/fluid/modules/gapi/src/compiler/gislandmodel.cpp index a135123f822..4d0feaea710 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/src/compiler/gislandmodel.cpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/compiler/gislandmodel.cpp @@ -18,6 +18,7 @@ #include "compiler/gmodel.hpp" #include "compiler/gislandmodel.hpp" #include "compiler/gmodel.hpp" +#include "backends/common/gbackend.hpp" // RMatAdapter #include "logger.hpp" // GAPI_LOG @@ -174,13 +175,26 @@ void GIslandModel::generateInitial(GIslandModel::Graph &g, { auto src_data_nh = in_edge->srcNode(); auto isl_slot_nh = data_to_slot.at(src_data_nh); - g.link(isl_slot_nh, nh); // no other data stored yet + auto isl_new_eh = g.link(isl_slot_nh, nh); // no other data stored yet + // Propagate some special metadata from the GModel to GIslandModel + // TODO: Make it a single place (a function) for both inputs/outputs? + // (since it is duplicated in the below code block) + if (src_g.metadata(in_edge).contains()) + { + const auto idx = src_g.metadata(in_edge).get().index; + g.metadata(isl_new_eh).set(DesyncIslEdge{idx}); + } } for (auto out_edge : src_op_nh->outEdges()) { auto dst_data_nh = out_edge->dstNode(); auto isl_slot_nh = data_to_slot.at(dst_data_nh); - g.link(nh, isl_slot_nh); + auto isl_new_eh = g.link(nh, isl_slot_nh); + if (src_g.metadata(out_edge).contains()) + { + const auto idx = src_g.metadata(out_edge).get().index; + g.metadata(isl_new_eh).set(DesyncIslEdge{idx}); + } } } // for(all_operations) } @@ -253,6 +267,9 @@ void GIslandModel::syncIslandTags(Graph &g, ade::Graph &orig_g) void GIslandModel::compileIslands(Graph &g, const ade::Graph &orig_g, const GCompileArgs &args) { GModel::ConstGraph gm(orig_g); + if (gm.metadata().contains()) { + util::throw_error(std::logic_error("FATAL: The graph has unresolved intrinsics")); + } auto original_sorted = gm.metadata().get(); for (auto nh : g.nodes()) @@ -340,26 +357,21 @@ void GIslandExecutable::run(GIslandExecutable::IInput &in, GIslandExecutable::IO for (auto &&it: ade::util::zip(ade::util::toRange(in_desc), ade::util::toRange(in_vector))) { - // FIXME: Not every Island expects a cv::Mat instead of own::Mat on input - // This kludge should go as a result of de-ownification const cv::GRunArg& in_data_orig = std::get<1>(it); cv::GRunArg in_data; -#if !defined(GAPI_STANDALONE) switch (in_data_orig.index()) { case cv::GRunArg::index_of(): - in_data = cv::GRunArg{cv::util::get(in_data_orig)}; - break; - case cv::GRunArg::index_of(): - in_data = cv::GRunArg{(cv::util::get(in_data_orig))}; + // FIXME: This whole construct is ugly, from + // its writing to a need in this in general + in_data = cv::GRunArg{ cv::make_rmat(cv::util::get(in_data_orig)) + , in_data_orig.meta + }; break; default: in_data = in_data_orig; break; } -#else - in_data = in_data_orig; -#endif // GAPI_STANDALONE in_objs.emplace_back(std::get<0>(it), std::move(in_data)); } for (auto &&it: ade::util::indexed(ade::util::toRange(out_desc))) @@ -368,9 +380,27 @@ void GIslandExecutable::run(GIslandExecutable::IInput &in, GIslandExecutable::IO out.get(ade::util::checked_cast(ade::util::index(it)))); } run(std::move(in_objs), std::move(out_objs)); + + // Propagate in-graph meta down to the graph + // Note: this is not a complete implementation! Mainly this is a stub + // and the proper implementation should come later. + // + // Propagating the meta information here has its pros and cons. + // Pros: it works here uniformly for both regular and streaming cases, + // also for the majority of old-fashioned (synchronous) backends + // Cons: backends implementing the asynchronous run(IInput,IOutput) + // won't get it out of the box + cv::GRunArg::Meta stub_meta; + for (auto &&in_arg : in_vector) + { + stub_meta.insert(in_arg.meta.begin(), in_arg.meta.end()); + } + // Report output objects as "ready" to the executor, also post + // calculated in-graph meta for the objects for (auto &&it: out_objs) { - out.post(std::move(it.second)); // report output objects as "ready" to the executor + out.meta(it.second, stub_meta); + out.post(std::move(it.second)); } } diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/compiler/gislandmodel.hpp b/inference-engine/thirdparty/fluid/modules/gapi/src/compiler/gislandmodel.hpp index 390bdb55f9b..e8eb73692bc 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/src/compiler/gislandmodel.hpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/compiler/gislandmodel.hpp @@ -22,7 +22,6 @@ namespace cv { namespace gimpl { - // FIXME: GAPI_EXPORTS only because of tests! class GAPI_EXPORTS GIsland { @@ -122,6 +121,8 @@ public: virtual bool canReshape() const = 0; virtual void reshape(ade::Graph& g, const GCompileArgs& args) = 0; + virtual bool allocatesOutputs() const { return false; } + virtual cv::RMat allocate(const cv::GMatDesc&) const { GAPI_Assert(false && "should never be called"); } // This method is called when the GStreamingCompiled gets a new // input source to process. Normally this method is called once @@ -141,6 +142,14 @@ public: // at that stage. virtual void handleNewStream() {}; // do nothing here by default + // This method is called for every IslandExecutable when + // the stream-based execution is stopped. + // All processing is guaranteed to be stopped by this moment, + // with no pending or running 'run()' processes ran in background. + // FIXME: This method is tightly bound to the GStreamingExecutor + // now. + virtual void handleStopStream() {} // do nothing here by default + virtual ~GIslandExecutable() = default; }; @@ -163,6 +172,10 @@ struct GIslandExecutable::IOutput: public GIslandExecutable::IODesc { virtual GRunArgP get(int idx) = 0; // Allocate (wrap) a new data object for output idx virtual void post(GRunArgP&&) = 0; // Release the object back to the framework (mark available) virtual void post(EndOfStream&&) = 0; // Post end-of-stream marker back to the framework + + // Assign accumulated metadata to the given output object. + // This method can only be called after get() and before post(). + virtual void meta(const GRunArgP&, const GRunArg::Meta &) = 0; }; // GIslandEmitter - a backend-specific thing which feeds data into @@ -221,8 +234,19 @@ struct IslandsCompiled static const char *name() { return "IslandsCompiled"; } }; +// This flag marks an edge in an GIslandModel as "desynchronized" +// i.e. it starts a new desynchronized subgraph +struct DesyncIslEdge +{ + static const char *name() { return "DesynchronizedIslandEdge"; } + + // Projection from GModel/DesyncEdge.index + int index; +}; + namespace GIslandModel { + using Graph = ade::TypedGraph < NodeKind , FusedIsland @@ -231,6 +255,7 @@ namespace GIslandModel , Emitter , Sink , IslandsCompiled + , DesyncIslEdge , ade::passes::TopologicalSortData >; @@ -243,6 +268,7 @@ namespace GIslandModel , Emitter , Sink , IslandsCompiled + , DesyncIslEdge , ade::passes::TopologicalSortData >; diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/compiler/gmodel.cpp b/inference-engine/thirdparty/fluid/modules/gapi/src/compiler/gmodel.cpp index 39dc1da33bb..ea4eb880a43 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/src/compiler/gmodel.cpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/compiler/gmodel.cpp @@ -23,12 +23,16 @@ namespace cv { namespace gimpl { -ade::NodeHandle GModel::mkOpNode(GModel::Graph &g, const GKernel &k, const std::vector &args, const std::string &island) +ade::NodeHandle GModel::mkOpNode(GModel::Graph &g, + const GKernel &k, + const std::vector &args, + const cv::util::any ¶ms, + const std::string &island) { ade::NodeHandle op_h = g.createNode(); g.metadata(op_h).set(NodeType{NodeType::OP}); //These extra empty {} are to please GCC (-Wmissing-field-initializers) - g.metadata(op_h).set(Op{k, args, {}, {}}); + g.metadata(op_h).set(Op{k, args, {}, {}, params}); if (!island.empty()) g.metadata(op_h).set(Island{island}); return op_h; @@ -73,7 +77,7 @@ ade::NodeHandle GModel::mkDataNode(GModel::Graph &g, const GShape shape) return data_h; } -void GModel::linkIn(Graph &g, ade::NodeHandle opH, ade::NodeHandle objH, std::size_t in_port) +ade::EdgeHandle GModel::linkIn(Graph &g, ade::NodeHandle opH, ade::NodeHandle objH, std::size_t in_port) { // Check if input is already connected for (const auto& in_e : opH->inEdges()) @@ -92,9 +96,11 @@ void GModel::linkIn(Graph &g, ade::NodeHandle opH, ade::NodeHandle objH, std::si // Replace an API object with a REF (G* -> GOBJREF) op.args[in_port] = cv::GArg(RcDesc{gm.rc, gm.shape, {}}); + + return eh; } -void GModel::linkOut(Graph &g, ade::NodeHandle opH, ade::NodeHandle objH, std::size_t out_port) +ade::EdgeHandle GModel::linkOut(Graph &g, ade::NodeHandle opH, ade::NodeHandle objH, std::size_t out_port) { // FIXME: check validity using kernel prototype @@ -117,6 +123,8 @@ void GModel::linkOut(Graph &g, ade::NodeHandle opH, ade::NodeHandle objH, std::s const auto min_out_size = std::max(op.outs.size(), storage_with_port); op.outs.resize(min_out_size, RcDesc{-1,GShape::GMAT,{}}); // FIXME: Invalid shape instead? op.outs[out_port] = RcDesc{gm.rc, gm.shape, {}}; + + return eh; } std::vector GModel::orderedInputs(const ConstGraph &g, ade::NodeHandle nh) @@ -206,26 +214,29 @@ ade::NodeHandle GModel::detail::dataNodeOf(const ConstLayoutGraph &g, const GOri return g.metadata().get().object_nodes.at(origin); } -void GModel::redirectReaders(Graph &g, ade::NodeHandle from, ade::NodeHandle to) +std::vector GModel::redirectReaders(Graph &g, ade::NodeHandle from, ade::NodeHandle to) { std::vector ehh(from->outEdges().begin(), from->outEdges().end()); + std::vector ohh; + ohh.reserve(ehh.size()); for (auto e : ehh) { auto dst = e->dstNode(); auto input = g.metadata(e).get(); g.erase(e); - linkIn(g, dst, to, input.port); + ohh.push_back(linkIn(g, dst, to, input.port)); } + return ohh; } -void GModel::redirectWriter(Graph &g, ade::NodeHandle from, ade::NodeHandle to) +ade::EdgeHandle GModel::redirectWriter(Graph &g, ade::NodeHandle from, ade::NodeHandle to) { GAPI_Assert(from->inEdges().size() == 1); auto e = from->inEdges().front(); auto op = e->srcNode(); auto output = g.metadata(e).get(); g.erase(e); - linkOut(g, op, to, output.port); + return linkOut(g, op, to, output.port); } GMetaArgs GModel::collectInputMeta(const GModel::ConstGraph &cg, ade::NodeHandle node) diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/compiler/gmodel.hpp b/inference-engine/thirdparty/fluid/modules/gapi/src/compiler/gmodel.hpp index 8f78ba49b7a..d016766fb50 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/src/compiler/gmodel.hpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/compiler/gmodel.hpp @@ -61,6 +61,7 @@ struct Op std::vector outs; // TODO: Introduce a new type for resource references cv::gapi::GBackend backend; + cv::util::any params; // Operation specific information }; struct Data @@ -210,6 +211,58 @@ struct CustomMetaFunction CM customOutMeta; }; +// This is a general flag indicating that this GModel has intrinsics. +// In the beginning of the compilation, it is a quick check to +// indicate there are intrinsics. +// +// In the end of the compilation, having this flag is fatal -- all +// intrinsics must be resolved. +struct HasIntrinsics +{ + static const char *name() { return "HasIntrinsicsFlag"; } +}; + +// This is a special tag for both DATA and OP nodes indicating +// which desynchronized path this node belongs to. +// This tag is set by a special complex pass intrinDesync/accept. +struct DesyncPath +{ + static const char *name() { return "DesynchronizedPath"; } + + // A zero-based index of the desynchronized path in the graph. + // Set by intrinDesync() compiler pass + int index; +}; + +// This is a special tag for graph Edges indicating that this +// particular edge starts a desynchronized path in the graph. +// At the execution stage, the data coming "through" these edges +// (virtually, of course, since our GModel edges never transfer the +// actual data, they just represent these transfers) is desynchronized +// from the rest of the pipeline, i.e. may be "lost" (stay unconsumed +// and then overwritten with some new data when streaming). +struct DesyncEdge +{ + static const char *name() { return "DesynchronizedEdge"; } + + // A zero-based index of the desynchronized path in the graph. + // Set by intrinDesync/apply() compiler pass + int index; +}; + +// This flag marks the island graph as "desynchronized" +struct Desynchronized +{ + static const char *name() { return "Desynchronized"; } +}; + +// Reference to compile args of the computation +struct CompileArgs +{ + static const char *name() { return "CompileArgs"; } + GCompileArgs args; +}; + namespace GModel { using Graph = ade::TypedGraph @@ -231,6 +284,11 @@ namespace GModel , CustomMetaFunction , Streaming , Deserialized + , HasIntrinsics + , DesyncPath + , DesyncEdge + , Desynchronized + , CompileArgs >; // FIXME: How to define it based on GModel??? @@ -253,6 +311,11 @@ namespace GModel , CustomMetaFunction , Streaming , Deserialized + , HasIntrinsics + , DesyncPath + , DesyncEdge + , Desynchronized + , CompileArgs >; // FIXME: @@ -262,7 +325,11 @@ namespace GModel // GAPI_EXPORTS for tests GAPI_EXPORTS void init (Graph& g); - GAPI_EXPORTS ade::NodeHandle mkOpNode(Graph &g, const GKernel &k, const std::vector& args, const std::string &island); + GAPI_EXPORTS ade::NodeHandle mkOpNode(Graph &g, + const GKernel &k, + const std::vector& args, + const cv::util::any& params, + const std::string &island); // Isn't used by the framework or default backends, required for external backend development GAPI_EXPORTS ade::NodeHandle mkDataNode(Graph &g, const GShape shape); @@ -273,11 +340,11 @@ namespace GModel // Clears logged messages of a node. GAPI_EXPORTS void log_clear(Graph &g, ade::NodeHandle node); - GAPI_EXPORTS void linkIn (Graph &g, ade::NodeHandle op, ade::NodeHandle obj, std::size_t in_port); - GAPI_EXPORTS void linkOut (Graph &g, ade::NodeHandle op, ade::NodeHandle obj, std::size_t out_port); + GAPI_EXPORTS ade::EdgeHandle linkIn (Graph &g, ade::NodeHandle op, ade::NodeHandle obj, std::size_t in_port); + GAPI_EXPORTS ade::EdgeHandle linkOut (Graph &g, ade::NodeHandle op, ade::NodeHandle obj, std::size_t out_port); - GAPI_EXPORTS void redirectReaders(Graph &g, ade::NodeHandle from, ade::NodeHandle to); - GAPI_EXPORTS void redirectWriter (Graph &g, ade::NodeHandle from, ade::NodeHandle to); + GAPI_EXPORTS std::vector redirectReaders(Graph &g, ade::NodeHandle from, ade::NodeHandle to); + GAPI_EXPORTS ade::EdgeHandle redirectWriter (Graph &g, ade::NodeHandle from, ade::NodeHandle to); GAPI_EXPORTS std::vector orderedInputs (const ConstGraph &g, ade::NodeHandle nh); GAPI_EXPORTS std::vector orderedOutputs(const ConstGraph &g, ade::NodeHandle nh); diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/compiler/gmodelbuilder.cpp b/inference-engine/thirdparty/fluid/modules/gapi/src/compiler/gmodelbuilder.cpp index 87e9ab55b8c..5f8f3518fce 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/src/compiler/gmodelbuilder.cpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/compiler/gmodelbuilder.cpp @@ -134,12 +134,19 @@ cv::gimpl::Unrolled cv::gimpl::unrollExpr(const GProtoArgs &ins, // Put the outputs object description of the node // so that they are not lost if they are not consumed by other operations + GAPI_Assert(call_p.m_k.outCtors.size() == call_p.m_k.outShapes.size()); for (const auto &it : ade::util::indexed(call_p.m_k.outShapes)) { std::size_t port = ade::util::index(it); GShape shape = ade::util::value(it); - GOrigin org { shape, node, port, {}, origin.kind }; + // FIXME: then use ZIP + HostCtor ctor = call_p.m_k.outCtors[port]; + + // NB: Probably this fixes all other "missing host ctor" + // problems. + // TODO: Clean-up the old workarounds if it really is. + GOrigin org {shape, node, port, std::move(ctor), origin.kind}; origins.insert(org); } @@ -286,7 +293,7 @@ ade::NodeHandle cv::gimpl::GModelBuilder::put_OpNode(const cv::GNode &node) { GAPI_Assert(node.shape() == GNode::NodeShape::CALL); const auto &call_p = node.call().priv(); - auto nh = cv::gimpl::GModel::mkOpNode(m_gm, call_p.m_k, call_p.m_args, node_p.m_island); + auto nh = cv::gimpl::GModel::mkOpNode(m_gm, call_p.m_k, call_p.m_args, call_p.m_params, node_p.m_island); m_graph_ops[&node_p] = nh; return nh; } diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/compiler/gobjref.hpp b/inference-engine/thirdparty/fluid/modules/gapi/src/compiler/gobjref.hpp index dd0939c439c..bca6fa525e4 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/src/compiler/gobjref.hpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/compiler/gobjref.hpp @@ -16,15 +16,9 @@ namespace cv namespace gimpl { - // Union type for various user-defined type constructors (GArray, GOpaque, etc) - // FIXME: Replace construct-only API with a more generic one - // (probably with bits of introspection) - // Not required for non-user-defined types (GMat, GScalar, etc) - using HostCtor = util::variant - < util::monostate - , detail::ConstructVec - , detail::ConstructOpaque - >; + // HostCtor was there, but then moved to public + // Redeclare here to avoid changing tons of code + using HostCtor = cv::detail::HostCtor; using ConstVal = util::variant < util::monostate diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/compiler/gstreaming.cpp b/inference-engine/thirdparty/fluid/modules/gapi/src/compiler/gstreaming.cpp index 2e9c016ceb8..fa736d592eb 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/src/compiler/gstreaming.cpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/compiler/gstreaming.cpp @@ -8,6 +8,7 @@ #include "precomp.hpp" #include +#include // util::indexed #include // can_describe #include @@ -69,6 +70,11 @@ bool cv::GStreamingCompiled::Priv::pull(cv::GRunArgsP &&outs) return m_exec->pull(std::move(outs)); } +bool cv::GStreamingCompiled::Priv::pull(cv::GOptRunArgsP &&outs) +{ + return m_exec->pull(std::move(outs)); +} + bool cv::GStreamingCompiled::Priv::try_pull(cv::GRunArgsP &&outs) { return m_exec->try_pull(std::move(outs)); @@ -111,6 +117,58 @@ bool cv::GStreamingCompiled::pull(cv::GRunArgsP &&outs) return m_priv->pull(std::move(outs)); } +std::tuple cv::GStreamingCompiled::pull() +{ + // FIXME: Why it is not @ priv?? + GRunArgs run_args; + GRunArgsP outs; + const auto& out_info = m_priv->outInfo(); + run_args.reserve(out_info.size()); + outs.reserve(out_info.size()); + + for (auto&& info : out_info) + { + switch (info.shape) + { + case cv::GShape::GMAT: + { + run_args.emplace_back(cv::Mat{}); + outs.emplace_back(&cv::util::get(run_args.back())); + break; + } + case cv::GShape::GSCALAR: + { + run_args.emplace_back(cv::Scalar{}); + outs.emplace_back(&cv::util::get(run_args.back())); + break; + } + case cv::GShape::GARRAY: + { + switch (info.kind) + { + case cv::detail::OpaqueKind::CV_POINT2F: + run_args.emplace_back(cv::detail::VectorRef{std::vector{}}); + outs.emplace_back(cv::util::get(run_args.back())); + break; + default: + util::throw_error(std::logic_error("Unsupported kind for GArray")); + } + break; + } + default: + util::throw_error(std::logic_error("Only cv::GMat and cv::GScalar are supported for python output")); + } + } + + bool is_over = m_priv->pull(std::move(outs)); + return std::make_tuple(is_over, run_args); +} + +bool cv::GStreamingCompiled::pull(cv::GOptRunArgsP &&outs) +{ + return m_priv->pull(std::move(outs)); +} + bool cv::GStreamingCompiled::try_pull(cv::GRunArgsP &&outs) { return m_priv->try_pull(std::move(outs)); diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/compiler/gstreaming_priv.hpp b/inference-engine/thirdparty/fluid/modules/gapi/src/compiler/gstreaming_priv.hpp index 447bcda76e0..59b19d42526 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/src/compiler/gstreaming_priv.hpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/compiler/gstreaming_priv.hpp @@ -20,7 +20,7 @@ namespace gimpl // FIXME: GAPI_EXPORTS is here only due to tests and Windows linker issues // FIXME: It seems it clearly duplicates the GStreamingCompiled and -// GStreamingExecutable APIs so is highly redundant now. +// GStreamingIntrinExecutable APIs so is highly redundant now. // Same applies to GCompiled/GCompiled::Priv/GExecutor. class GAPI_EXPORTS GStreamingCompiled::Priv { @@ -28,6 +28,10 @@ class GAPI_EXPORTS GStreamingCompiled::Priv GMetaArgs m_outMetas; // inferred by compiler std::unique_ptr m_exec; + // NB: Used by python wrapper to clarify input/output types + GTypesInfo m_out_info; + GTypesInfo m_in_info; + public: void setup(const GMetaArgs &metaArgs, const GMetaArgs &outMetas, @@ -41,10 +45,17 @@ public: void setSource(GRunArgs &&args); void start(); bool pull(cv::GRunArgsP &&outs); + bool pull(cv::GOptRunArgsP &&outs); bool try_pull(cv::GRunArgsP &&outs); void stop(); bool running() const; + + void setOutInfo(const GTypesInfo& info) { m_out_info = std::move(info); } + const GTypesInfo& outInfo() const { return m_out_info; } + + void setInInfo(const GTypesInfo& info) { m_in_info = std::move(info); } + const GTypesInfo& inInfo() const { return m_in_info; } }; } // namespace cv diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/compiler/passes/dump_dot.cpp b/inference-engine/thirdparty/fluid/modules/gapi/src/compiler/passes/dump_dot.cpp index 15e6a9fa098..b7f5ea96d3f 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/src/compiler/passes/dump_dot.cpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/compiler/passes/dump_dot.cpp @@ -33,6 +33,7 @@ void dumpDot(const ade::Graph &g, std::ostream& os) {cv::GShape::GSCALAR, "GScalar"}, {cv::GShape::GARRAY, "GArray"}, {cv::GShape::GOPAQUE, "GOpaque"}, + {cv::GShape::GFRAME, "GFrame"}, }; auto format_op_label = [&gr](ade::NodeHandle nh) -> std::string { diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/compiler/passes/exec.cpp b/inference-engine/thirdparty/fluid/modules/gapi/src/compiler/passes/exec.cpp index 755538bb46e..f6a73489eb3 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/src/compiler/passes/exec.cpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/compiler/passes/exec.cpp @@ -20,6 +20,7 @@ #include // util::optional #include "logger.hpp" // GAPI_LOG +#include "api/gbackend_priv.hpp" // for canMerge() #include "compiler/gmodel.hpp" #include "compiler/gislandmodel.hpp" #include "compiler/passes/passes.hpp" @@ -54,11 +55,28 @@ namespace // Also check the cases backend can't handle // (e.x. GScalar connecting two fluid ops should split the graph) const GModel::ConstGraph g(src_graph); + if (g.metadata().contains()) { + // Fusion of a graph having a desynchronized path is + // definitely non-trivial + return false; + } const auto& active_backends = g.metadata().get().backends; - return active_backends.size() == 1 && - ade::util::all_of(g.nodes(), [&](ade::NodeHandle nh) { - return !g.metadata(nh).contains(); - }); + if (active_backends.size() != 1u) { + // More than 1 backend involved - non-trivial + return false; + } + const auto& has_island_tags = [&](ade::NodeHandle nh) { + return g.metadata(nh).contains(); + }; + if (ade::util::any_of(g.nodes(), has_island_tags)) { + // There are user-defined islands - non-trivial + return false; + } + if (active_backends.begin()->priv().controlsMerge()) { + // If the only backend controls Island Fusion on its own - non-trivial + return false; + } + return true; } void fuseTrivial(GIslandModel::Graph &g, const ade::Graph &src_graph) @@ -71,12 +89,12 @@ namespace all.insert(src_g.nodes().begin(), src_g.nodes().end()); - for (const auto nh : proto.in_nhs) + for (const auto& nh : proto.in_nhs) { all.erase(nh); in_ops.insert(nh->outNodes().begin(), nh->outNodes().end()); } - for (const auto nh : proto.out_nhs) + for (const auto& nh : proto.out_nhs) { all.erase(nh); out_ops.insert(nh->inNodes().begin(), nh->inNodes().end()); @@ -90,12 +108,12 @@ namespace auto ih = GIslandModel::mkIslandNode(g, std::move(isl)); - for (const auto nh : proto.in_nhs) + for (const auto& nh : proto.in_nhs) { auto slot = GIslandModel::mkSlotNode(g, nh); g.link(slot, ih); } - for (const auto nh : proto.out_nhs) + for (const auto& nh : proto.out_nhs) { auto slot = GIslandModel::mkSlotNode(g, nh); g.link(ih, slot); @@ -125,9 +143,9 @@ namespace }; bool canMerge(const GIslandModel::Graph &g, - const ade::NodeHandle a_nh, - const ade::NodeHandle /*slot_nh*/, - const ade::NodeHandle b_nh, + const ade::NodeHandle &a_nh, + const ade::NodeHandle &slot_nh, + const ade::NodeHandle &b_nh, const MergeContext &ctx = MergeContext()) { auto a_ptr = g.metadata(a_nh).get().object; @@ -142,8 +160,8 @@ namespace // Islands which cause a cycle can't be merged as well // (since the flag is set, the procedure already tried to // merge these islands in the past) - if (ade::util::contains(ctx.cycle_causers, std::make_pair(a_ptr, b_ptr))|| - ade::util::contains(ctx.cycle_causers, std::make_pair(b_ptr, a_ptr))) + if ( ade::util::contains(ctx.cycle_causers, std::make_pair(a_ptr, b_ptr)) + || ade::util::contains(ctx.cycle_causers, std::make_pair(b_ptr, a_ptr))) return false; // There may be user-defined islands. Initially user-defined @@ -163,7 +181,13 @@ namespace return false; } - // FIXME: add a backend-specified merge checker + // If available, run the backend-specified merge checker + const auto &this_backend_p = a_ptr->backend().priv(); + if ( this_backend_p.controlsMerge() + && !this_backend_p.allowsMerge(g, a_nh, slot_nh, b_nh)) + { + return false; + } return true; } @@ -205,10 +229,31 @@ namespace { using namespace std::placeholders; + // Before checking for candidates, find and ban neighbor nodes + // (input or outputs) which are connected via desynchronized + // edges. + GIsland::node_set nodes_with_desync_edges; + for (const auto& in_eh : nh->inEdges()) { + if (g.metadata(in_eh).contains()) { + nodes_with_desync_edges.insert(in_eh->srcNode()); + } + } + for (const auto& output_data_nh : nh->outNodes()) { + for (const auto &out_reader_eh : output_data_nh->outEdges()) { + if (g.metadata(out_reader_eh).contains()) { + nodes_with_desync_edges.insert(out_reader_eh->dstNode()); + } + } + } + // Find a first matching candidate GIsland for merge // among inputs - for (const auto& input_data_nh : nh->inNodes()) + for (const auto& in_eh : nh->inEdges()) { + if (ade::util::contains(nodes_with_desync_edges, in_eh->srcNode())) { + continue; // desync edges can never be fused + } + const auto& input_data_nh = in_eh->srcNode(); if (input_data_nh->inNodes().size() != 0) { // Data node must have a single producer only @@ -224,14 +269,17 @@ namespace // Ok, now try to find it among the outputs for (const auto& output_data_nh : nh->outNodes()) { - auto mergeTest = [&](ade::NodeHandle cons_nh) -> bool { - return canMerge(g, nh, output_data_nh, cons_nh, ctx); + auto mergeTest = [&](ade::EdgeHandle cons_eh) -> bool { + if (ade::util::contains(nodes_with_desync_edges, cons_eh->dstNode())) { + return false; // desync edges can never be fused + } + return canMerge(g, nh, output_data_nh, cons_eh->dstNode(), ctx); }; - auto cand_it = std::find_if(output_data_nh->outNodes().begin(), - output_data_nh->outNodes().end(), + auto cand_it = std::find_if(output_data_nh->outEdges().begin(), + output_data_nh->outEdges().end(), mergeTest); - if (cand_it != output_data_nh->outNodes().end()) - return std::make_tuple(*cand_it, + if (cand_it != output_data_nh->outEdges().end()) + return std::make_tuple((*cand_it)->dstNode(), output_data_nh, Direction::Out); } // for(outNodes) @@ -251,6 +299,7 @@ namespace ade::NodeHandle m_slot; ade::NodeHandle m_cons; + using Change = ChangeT; Change::List m_changes; struct MergeObjects @@ -423,10 +472,10 @@ namespace auto backend = m_gim.metadata(m_prod).get() .object->backend(); auto merged = std::make_shared(backend, - std::move(mo.all), - std::move(mo.in_ops), - std::move(mo.out_ops), - std::move(maybe_user_tag)); + std::move(mo.all), + std::move(mo.in_ops), + std::move(mo.out_ops), + std::move(maybe_user_tag)); // FIXME: move this debugging to some user-controllable log-level #ifdef DEBUG_MERGE merged->debug(); @@ -440,7 +489,9 @@ namespace m_prod->inEdges().end()); for (auto in_edge : input_edges) { - m_changes.enqueue(m_g, in_edge->srcNode(), new_nh); + // FIXME: Introduce a Relink primitive instead? + // (combining the both actions into one?) + m_changes.enqueue(m_g, in_edge->srcNode(), new_nh, in_edge); m_changes.enqueue(m_g, m_prod, in_edge); } @@ -450,7 +501,7 @@ namespace m_cons->outEdges().end()); for (auto out_edge : output_edges) { - m_changes.enqueue(m_g, new_nh, out_edge->dstNode()); + m_changes.enqueue(m_g, new_nh, out_edge->dstNode(), out_edge); m_changes.enqueue(m_g, m_cons, out_edge); } @@ -491,6 +542,10 @@ namespace m_changes.enqueue(m_g, non_opt_slot_nh, eh); } } + // FIXME: No metadata copied here (from where??) + // For DesyncIslEdges it still works, as these tags are + // placed to Data->Op edges and this one is an Op->Data + // edge. m_changes.enqueue(m_g, new_nh, non_opt_slot_nh); } @@ -502,7 +557,7 @@ namespace m_prod->outEdges().end()); for (auto extra_out : prod_extra_out_edges) { - m_changes.enqueue(m_g, new_nh, extra_out->dstNode()); + m_changes.enqueue(m_g, new_nh, extra_out->dstNode(), extra_out); m_changes.enqueue(m_g, m_prod, extra_out); } @@ -514,7 +569,7 @@ namespace m_cons->inEdges().end()); for (auto extra_in : cons_extra_in_edges) { - m_changes.enqueue(m_g, extra_in->srcNode(), new_nh); + m_changes.enqueue(m_g, extra_in->srcNode(), new_nh, extra_in); m_changes.enqueue(m_g, m_cons, extra_in); } @@ -557,10 +612,10 @@ namespace there_was_a_merge = false; // FIXME: move this debugging to some user-controllable log level - #ifdef DEBUG_MERGE +#ifdef DEBUG_MERGE GAPI_LOG_INFO(NULL, "Before next merge attempt " << iteration << "..."); merge_debug(g, iteration); - #endif +#endif iteration++; auto sorted = pass_helpers::topoSort(im); for (auto nh : sorted) @@ -600,9 +655,9 @@ namespace "merge(" << l_obj->name() << "," << r_obj->name() << ") was successful!"); action.commit(); - #ifdef DEBUG_MERGE +#ifdef DEBUG_MERGE GIslandModel::syncIslandTags(gim, g); - #endif +#endif there_was_a_merge = true; break; // start do{}while from the beginning } diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/compiler/passes/intrin.cpp b/inference-engine/thirdparty/fluid/modules/gapi/src/compiler/passes/intrin.cpp new file mode 100644 index 00000000000..56f2db69e03 --- /dev/null +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/compiler/passes/intrin.cpp @@ -0,0 +1,305 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Copyright (C) 2020 Intel Corporation + + +#include "precomp.hpp" + +#include +#include +#include // GDesync intrinsic + +#include "compiler/gmodel.hpp" +#include "compiler/passes/passes.hpp" + +namespace desync { +namespace { + +// Drop the desynchronized node `nh` from the graph, reconnect the +// graph structure properly. This is a helper function which is used +// in both drop(g) and apply(g) passes. +// +// @return a vector of new edge handles connecting the "main" graph +// with its desynchronized part. +std::vector drop(cv::gimpl::GModel::Graph &g, + ade::NodeHandle nh) { + using namespace cv::gimpl; + + // What we need to do here: + // 1. Connect the readers of its produced data objects + // to the input data objects of desync; + // 2. Drop the data object it produces. + // 3. Drop the desync operation itself; + std::vector in_data_objs = GModel::orderedInputs(g, nh); + std::vector out_data_objs = GModel::orderedOutputs(g, nh); + std::vector new_links; + GAPI_Assert(in_data_objs.size() == out_data_objs.size()); + GAPI_DbgAssert(ade::util::all_of + (out_data_objs, + [&](const ade::NodeHandle &oh) { + return g.metadata(oh).contains(); + })); + // (1) + for (auto &&it: ade::util::zip(ade::util::toRange(in_data_objs), + ade::util::toRange(out_data_objs))) { + auto these_new_links = GModel::redirectReaders(g, + std::get<1>(it), + std::get<0>(it)); + new_links.insert(new_links.end(), + these_new_links.begin(), + these_new_links.end()); + } + // (2) + for (auto &&old_out_nh : out_data_objs) { + g.erase(old_out_nh); + } + // (3) + g.erase(nh); + + return new_links; +} + +// Tracing a desynchronizing subgraph is somewhat tricky and happens +// in both directions: downwards and upwards. +// +// The downward process is the basic one: we start with a "desync" +// OP node and go down to the graph using the "output" edges. We check +// if all nodes on this path [can] belong to this desynchronized path +// and don't overlap with others. +// +// An important contract to maintain is that the desynchronized part +// can't have any input references from the "main" graph part or any +// other desynchronized part in the graph. This contract is validated +// by checking every node's input which must belong to the same +// desynchronized part. +// +// Here is the pitfall of this check: +// +// v +// GMat_0 +// v +// +----------+ +// | desync() | <- This point originates the traceDown process +// +----------+ +// v +// GMat_0' <- This node will be tagged for this desync at +// :--------. step 0/1 +// v : <- The order how output nodes are visited is not +// +----------+ : specified, we can visit Op2() first (as there +// | Op1() | : is a direct link) bypassing visiting and tagging +// +----------+ : Op1() and GMat_1 +// v : +// GMat_1 : +// : .---' +// v v <- When we visit Op2() via the 2nd edge on this +// +----------+ graph, we check if all inputs belong to the same +// | Op2() | desynchronized graph and GMat_1 fails this check +// +----------+ (since the traceDown() process haven't visited +// it yet). +// +// Cases like this originate the traceUp() process: if we find an +// input node in our desynchronized path which doesn't belong to this +// path YET, it is not 100% a problem, and we need to trace it back +// (upwards) to see if it is really a case. + +// This recursive function checks the desync_id in the graph upwards. +// The process doesn't continue for nodes which have a valid +// desync_id already. +// The process only continues for nodes which have no desync_id +// assigned. If there's no such nodes anymore, the procedure is +// considered complete and a list of nodes to tag is returned to the +// caller. +// +// If NO inputs of this node have a valid desync_id, the desync +// invariant is broken and the function throws. +void traceUp(cv::gimpl::GModel::Graph &g, + const ade::NodeHandle &nh, + int desync_id, + std::vector &path) { + using namespace cv::gimpl; + + GAPI_Assert(!nh->inNodes().empty() + && "traceUp: a desynchronized part of the graph is not isolated?"); + + if (g.metadata(nh).contains()) { + // We may face nodes which have DesyncPath already visited during + // this recursive process (e.g. via some other output or branch in the + // subgraph) + if (g.metadata(nh).get().index != desync_id) { + GAPI_Assert(false && "Desynchronization can't be nested!"); + } + return; // This object belongs to the desync path - exit early. + } + + // Regardless of the result, put this nh to the path + path.push_back(nh); + + // Check if the input nodes are OK + std::vector nodes_to_trace; + nodes_to_trace.reserve(nh->inNodes().size()); + for (auto &&in_nh : nh->inNodes()) { + if (g.metadata(in_nh).contains()) { + // We may face nodes which have DesyncPath already visited during + // this recursive process (e.g. via some other output or branch in the + // subgraph) + GAPI_Assert(g.metadata(in_nh).get().index == desync_id + && "Desynchronization can't be nested!"); + } else { + nodes_to_trace.push_back(in_nh); + } + } + + // If there are nodes to trace, continue the recursion + for (auto &&up_nh : nodes_to_trace) { + traceUp(g, up_nh, desync_id, path); + } +} + +// This recursive function propagates the desync_id down to the graph +// starting at nh, and also checks: +// - if this desync path is isolated; +// - if this desync path is not overlapped. +// It also originates the traceUp() process at the points of +// uncertainty (as described in the comment above). +void traceDown(cv::gimpl::GModel::Graph &g, + const ade::NodeHandle &nh, + int desync_id) { + using namespace cv::gimpl; + + if (g.metadata(nh).contains()) { + // We may face nodes which have DesyncPath already visited during + // this recursive process (e.g. via some other output or branch in the + // subgraph) + GAPI_Assert(g.metadata(nh).get().index == desync_id + && "Desynchronization can't be nested!"); + } else { + g.metadata(nh).set(DesyncPath{desync_id}); + } + + // All inputs of this data object must belong to the same + // desync path. + for (auto &&in_nh : nh->inNodes()) { + // If an input object is not assigned to this desync path, + // it does not means that the object doesn't belong to + // this path. Check it. + std::vector path_up; + traceUp(g, in_nh, desync_id, path_up); + // We get here on success. Just set the proper tags for + // the identified input path. + for (auto &&up_nh : path_up) { + g.metadata(up_nh).set(DesyncPath{desync_id}); + } + } + + // Propagate the tag & check down + for (auto &&out_nh : nh->outNodes()) { + traceDown(g, out_nh, desync_id); + } +} + +// Streaming case: ensure the graph has proper isolation of the +// desynchronized parts, set proper Edge metadata hints for +// GStreamingIntrinExecutable +void apply(cv::gimpl::GModel::Graph &g) { + using namespace cv::gimpl; + + // Stage 0. Trace down the desync operations in the graph. + // Tag them with their unique (per graph) identifiers. + int total_desync = 0; + for (auto &&nh : g.nodes()) { + if (g.metadata(nh).get().t == NodeType::OP) { + const auto &op = g.metadata(nh).get(); + if (op.k.name == cv::gapi::streaming::detail::GDesync::id()) { + GAPI_Assert(!g.metadata(nh).contains() + && "Desynchronization can't be nested!"); + const int this_desync_id = total_desync++; + g.metadata(nh).set(DesyncPath{this_desync_id}); + for (auto &&out_nh: nh->outNodes()) { + traceDown(g, out_nh, this_desync_id); + } + } // if (desync) + } // if(OP) + } // for(nodes) + + // Tracing is done for all desync ops in the graph now. + // Stage 1. Drop the desync operations from the graph, but mark + // the desynchronized edges a special way. + // The desynchronized edge is the edge which connects a main + // subgraph data with a desynchronized subgraph data. + std::vector nodes(g.nodes().begin(), g.nodes().end()); + for (auto &&nh : nodes) { + if (nh == nullptr) { + // Some nodes could be dropped already during the procedure + // thanks ADE their NodeHandles updated automatically + continue; + } + if (g.metadata(nh).get().t == NodeType::OP) { + const auto &op = g.metadata(nh).get(); + if (op.k.name == cv::gapi::streaming::detail::GDesync::id()) { + auto index = g.metadata(nh).get().index; + auto new_links = drop(g, nh); + for (auto &&eh : new_links) { + g.metadata(eh).set(DesyncEdge{index}); + } + } // if (desync) + } // if (Op) + } // for(nodes) + + // Stage 2. Put a synchronized tag if there were changes applied + if (total_desync > 0) { + g.metadata().set(Desynchronized{}); + } +} + +// Probably the simplest case: desync makes no sense in the regular +// compilation process, so just drop all its occurences in the graph, +// reconnecting nodes properly. +void drop(cv::gimpl::GModel::Graph &g) { + // FIXME: LOG here that we're dropping the desync operations as + // they have no sense when compiling in the regular mode. + using namespace cv::gimpl; + std::vector nodes(g.nodes().begin(), g.nodes().end()); + for (auto &&nh : nodes) { + if (nh == nullptr) { + // Some nodes could be dropped already during the procedure + // thanks ADE their NodeHandles updated automatically + continue; + } + if (g.metadata(nh).get().t == NodeType::OP) { + const auto &op = g.metadata(nh).get(); + if (op.k.name == cv::gapi::streaming::detail::GDesync::id()) { + drop(g, nh); + } // if (desync) + } // if (Op) + } // for(nodes) +} + +} // anonymous namespace +} // namespace desync + +void cv::gimpl::passes::intrinDesync(ade::passes::PassContext &ctx) { + GModel::Graph gr(ctx.graph); + if (!gr.metadata().contains()) + return; + + gr.metadata().contains() + ? desync::apply(gr) // Streaming compilation + : desync::drop(gr); // Regular compilation +} + +// Clears the HasIntrinsics flag if all intrinsics have been handled. +void cv::gimpl::passes::intrinFinalize(ade::passes::PassContext &ctx) { + GModel::Graph gr(ctx.graph); + for (auto &&nh : gr.nodes()) { + if (gr.metadata(nh).get().t == NodeType::OP) { + const auto &op = gr.metadata(nh).get(); + if (is_intrinsic(op.k.name)) { + return; + } + } + } + // If reached here, really clear the flag + gr.metadata().erase(); +} diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/compiler/passes/kernels.cpp b/inference-engine/thirdparty/fluid/modules/gapi/src/compiler/passes/kernels.cpp index 69b339fb1ed..837e21f19a8 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/src/compiler/passes/kernels.cpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/compiler/passes/kernels.cpp @@ -14,6 +14,7 @@ #include // compound::backend() #include // GKernelPackage #include // GNetPackage +#include // GDesync intrinsic #include "compiler/gmodel.hpp" #include "compiler/passes/passes.hpp" @@ -24,6 +25,20 @@ #include "logger.hpp" // GAPI_LOG #include "api/gproto_priv.hpp" // is_dynamic, rewrap +namespace +{ + // FIXME: This may be not the right design choice, but so far it works + const std::vector known_intrinsics = { + cv::gapi::streaming::detail::GDesync::id() + }; +} +bool cv::gimpl::is_intrinsic(const std::string &s) { + // FIXME: This search might be better in time once we start using string + return std::find(known_intrinsics.begin(), + known_intrinsics.end(), + s) != known_intrinsics.end(); +} + namespace { struct ImplInfo @@ -126,12 +141,18 @@ void cv::gimpl::passes::bindNetParams(ade::passes::PassContext &ctx, continue; pgr.metadata(nh).set(NetworkParams{it->params}); + op.backend = it->backend; } } } -// This pass, given the kernel package, selects a kernel implementation -// for every operation in the graph +// This pass, given the kernel package, selects a kernel +// implementation for every operation in the graph +// +// Starting OpenCV 4.3, G-API may have some special "intrinsic" +// operations. Those can be implemented by backends as regular +// kernels, but if not, they are handled by the framework itself in +// its optimization/execution passes. void cv::gimpl::passes::resolveKernels(ade::passes::PassContext &ctx, const gapi::GKernelPackage &kernels) { @@ -142,14 +163,44 @@ void cv::gimpl::passes::resolveKernels(ade::passes::PassContext &ctx, { if (gr.metadata(nh).get().t == NodeType::OP) { + // If the operation is known to be intrinsic and is NOT + // implemented in the package, just skip it - there should + // be some pass which handles it. auto &op = gr.metadata(nh).get(); - cv::gapi::GBackend selected_backend; - cv::GKernelImpl selected_impl; - std::tie(selected_backend, selected_impl) = kernels.lookup(op.k.name); + if (is_intrinsic(op.k.name) && !kernels.includesAPI(op.k.name)) { + gr.metadata().set(HasIntrinsics{}); + continue; + } + // FIXME: And this logic is terribly wrong. The right + // thing is to assign an intrinsic to a particular island + // if and only if it is: + // (a) surrounded by nodes of backend X, AND + // (b) is supported by backend X. + // Here we may have multiple backends supporting an + // intrinsic but only one of those gets selected. And + // this is exactly a situation we need multiple versions + // of the same kernel to be presented in the kernel + // package (as it was designed originally). - selected_backend.priv().unpackKernel(ctx.graph, nh, selected_impl); - op.backend = selected_backend; - active_backends.insert(selected_backend); + cv::GKernelImpl selected_impl; + + if (op.backend == cv::gapi::GBackend()) { + std::tie(op.backend, selected_impl) = kernels.lookup(op.k.name); + } else { + // FIXME: This needs to be reworked properly + // Lookup for implementation from the pre-assinged backend + cv::gapi::GBackend dummy; + std::tie(dummy, selected_impl) = op.backend.priv() + .auxiliaryKernels().lookup(op.k.name); + // FIXME: Warning here! + // This situation may happen when NN (infer) backend was assigned + // by tag in bindNetParams (see above) but at this stage the operation + // lookup resulted in another backend (and it is perfectly valid when + // we have multiple NN backends available). + } + + op.backend.priv().unpackKernel(ctx.graph, nh, selected_impl); + active_backends.insert(op.backend); if (gr.metadata().contains()) { @@ -181,6 +232,12 @@ void cv::gimpl::passes::expandKernels(ade::passes::PassContext &ctx, const gapi: if (gr.metadata(nh).get().t == NodeType::OP) { const auto& op = gr.metadata(nh).get(); + // FIXME: Essentially the same problem as in the above resolveKernels + if (is_intrinsic(op.k.name) && !kernels.includesAPI(op.k.name)) { + // Note: There's no need to set HasIntrinsics flag here + // since resolveKernels would do it later. + continue; + } cv::gapi::GBackend selected_backend; cv::GKernelImpl selected_impl; diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/compiler/passes/passes.hpp b/inference-engine/thirdparty/fluid/modules/gapi/src/compiler/passes/passes.hpp index 84142fc0554..8f187f6bb75 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/src/compiler/passes/passes.hpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/compiler/passes/passes.hpp @@ -31,7 +31,11 @@ namespace gapi { struct GNetPackage; } // namespace gapi -namespace gimpl { namespace passes { +namespace gimpl { + +bool is_intrinsic(const std::string &op_name); + +namespace passes { void dumpDot(const ade::Graph &g, std::ostream& os); void dumpDot(ade::passes::PassContext &ctx, std::ostream& os); @@ -66,6 +70,9 @@ void applyTransformations(ade::passes::PassContext &ctx, void addStreaming(ade::passes::PassContext &ctx); +void intrinDesync(ade::passes::PassContext &ctx); +void intrinFinalize(ade::passes::PassContext &ctx); + }} // namespace gimpl::passes } // namespace cv diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/compiler/passes/streaming.cpp b/inference-engine/thirdparty/fluid/modules/gapi/src/compiler/passes/streaming.cpp index 6e982e25538..9d5dd713c40 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/src/compiler/passes/streaming.cpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/compiler/passes/streaming.cpp @@ -32,7 +32,7 @@ namespace cv { namespace gimpl { namespace passes { * connected to a new "Sink" node which becomes its _consumer_. * * These extra nodes are required to streamline the queues - * initialization by the GStreamingExecutable and its derivatives. + * initialization by the GStreamingIntrinExecutable and its derivatives. */ void addStreaming(ade::passes::PassContext &ctx) { diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/compiler/transactions.hpp b/inference-engine/thirdparty/fluid/modules/gapi/src/compiler/transactions.hpp index 54af8a6e69a..bdc1723e197 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/src/compiler/transactions.hpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/compiler/transactions.hpp @@ -14,6 +14,7 @@ #include +#include "opencv2/gapi/util/util.hpp" // Seq #include "opencv2/gapi/own/assert.hpp" enum class Direction: int {Invalid, In, Out}; @@ -21,8 +22,50 @@ enum class Direction: int {Invalid, In, Out}; //////////////////////////////////////////////////////////////////////////// //// // TODO: Probably it can be moved to ADE +template +class Preserved +{ + using S = typename cv::detail::MkSeq::type; + std::tuple...> m_data; -namespace Change + template + cv::util::optional get(ade::ConstTypedGraph g, H h) { + return g.metadata(h).template contains() + ? cv::util::make_optional(g.metadata(h).template get()) + : cv::util::optional{}; + } + template + int set(ade::TypedGraph &g, H &h) { + const auto &opt = std::get(m_data); + if (opt.has_value()) + g.metadata(h).set(opt.value()); + return 0; + } + template + void copyTo_impl(ade::TypedGraph &g, H h, cv::detail::Seq) { + int unused[] = {0, set(g, h)...}; + (void) unused; + } +public: + Preserved(const ade::Graph &g, H h) { + ade::ConstTypedGraph tg(g); + m_data = std::make_tuple(get(tg, h)...); + } + void copyTo(ade::Graph &g, H h) { + ade::TypedGraph tg(g); + copyTo_impl(tg, h, S{}); + } +}; +// Do nothing if there's no metadata +template +class Preserved { +public: + Preserved(const ade::Graph &, H) {} + void copyTo(ade::Graph &, H) {} +}; + +template +struct ChangeT { struct Base { @@ -31,6 +74,8 @@ namespace Change virtual ~Base() = default; }; + template using Preserved = ::Preserved; + class NodeCreated final: public Base { ade::NodeHandle m_node; @@ -39,11 +84,7 @@ namespace Change virtual void rollback(ade::Graph &g) override { g.erase(m_node); } }; - // NB: Drops all metadata stored in the EdgeHandle, - // which is not restored even in the rollback - - // FIXME: either add a way for users to preserve meta manually - // or extend ADE to manipulate with meta such way + // FIXME: maybe extend ADE to clone/copy the whole metadata? class DropLink final: public Base { ade::NodeHandle m_node; @@ -51,13 +92,15 @@ namespace Change ade::NodeHandle m_sibling; + Preserved m_meta; + public: DropLink(ade::Graph &g, const ade::NodeHandle &node, const ade::EdgeHandle &edge) - : m_node(node), m_dir(node == edge->srcNode() - ? Direction::Out - : Direction::In) + : m_node(node) + , m_dir(node == edge->srcNode() ? Direction::Out : Direction::In) + , m_meta(g, edge) { m_sibling = (m_dir == Direction::In ? edge->srcNode() @@ -67,12 +110,17 @@ namespace Change virtual void rollback(ade::Graph &g) override { + // FIXME: Need to preserve metadata here! + // GIslandModel edges now have metadata + ade::EdgeHandle eh; switch(m_dir) { - case Direction::In: g.link(m_sibling, m_node); break; - case Direction::Out: g.link(m_node, m_sibling); break; + case Direction::In: eh = g.link(m_sibling, m_node); break; + case Direction::Out: eh = g.link(m_node, m_sibling); break; default: GAPI_Assert(false); } + GAPI_Assert(eh != nullptr); + m_meta.copyTo(g, eh); } }; @@ -82,10 +130,15 @@ namespace Change public: NewLink(ade::Graph &g, - const ade::NodeHandle &prod, - const ade::NodeHandle &cons) + const ade::NodeHandle &prod, + const ade::NodeHandle &cons, + const ade::EdgeHandle ©_from = ade::EdgeHandle()) : m_edge(g.link(prod, cons)) { + if (copy_from != nullptr) + { + Preserved(g, copy_from).copyTo(g, m_edge); + } } virtual void rollback(ade::Graph &g) override @@ -141,7 +194,7 @@ namespace Change } } }; -} // namespace Change +}; // struct Change //////////////////////////////////////////////////////////////////////////// #endif // OPENCV_GAPI_COMPILER_TRANSACTIONS_HPP diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/executor/conc_queue.hpp b/inference-engine/thirdparty/fluid/modules/gapi/src/executor/conc_queue.hpp index 5de50ef34bb..9875e8245a8 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/src/executor/conc_queue.hpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/executor/conc_queue.hpp @@ -119,8 +119,7 @@ void concurrent_bounded_queue::set_capacity(std::size_t capacity) { // Clear the queue. Similar to the TBB version, this method is not // thread-safe. template -void concurrent_bounded_queue::clear() -{ +void concurrent_bounded_queue::clear() { m_data = std::queue{}; } diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/executor/gapi_itt.hpp b/inference-engine/thirdparty/fluid/modules/gapi/src/executor/gapi_itt.hpp new file mode 100644 index 00000000000..2ab3237e7f6 --- /dev/null +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/executor/gapi_itt.hpp @@ -0,0 +1,59 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Copyright (C) 2020 Intel Corporation + +#ifndef OPENCV_GAPI_GAPI_ITT_HPP +#define OPENCV_GAPI_GAPI_ITT_HPP + +//for ITT_NAMED_TRACE_GUARD +#include +#include + +// FIXME: It seems that this macro is not propagated here by the OpenCV cmake (as this is not core module). +// (Consider using OpenCV's trace.hpp ) +#ifdef OPENCV_WITH_ITT +#include +#endif + +#include +namespace cv { +namespace util { + template< class T > + using remove_reference_t = typename std::remove_reference::type; + + // Home brew ScopeGuard + // D will be called automatically with p as argument when ScopeGuard goes out of scope. + // call release() on the ScopeGuard object to revoke guard action + template + auto make_ptr_guard(T* p, D&& d) -> std::unique_ptr> { + return {p, std::forward(d)}; + } +} // namespace util + +// FIXME: make it more reusable (and move to other place and other namespace) +namespace gimpl { namespace parallel { + #ifdef OPENCV_WITH_ITT + extern const __itt_domain* gapi_itt_domain; + + namespace { + auto make_itt_guard = [](__itt_string_handle* h) { + __itt_task_begin(gapi_itt_domain, __itt_null, __itt_null, (h)); + return util::make_ptr_guard(reinterpret_cast(1), [](int* ) { __itt_task_end(gapi_itt_domain); }); + }; + } // namespace + + #define GAPI_ITT_NAMED_TRACE_GUARD(name, h) auto name = cv::gimpl::parallel::make_itt_guard(h); cv::util::suppress_unused_warning(name) + #else + struct dumb_guard {void reset(){}}; + #define GAPI_ITT_NAMED_TRACE_GUARD(name, h) cv::gimpl::parallel::dumb_guard name; cv::util::suppress_unused_warning(name) + #endif + + #define GAPI_ITT_AUTO_TRACE_GUARD_IMPL_(LINE, h) GAPI_ITT_NAMED_TRACE_GUARD(itt_trace_guard_##LINE, h) + #define GAPI_ITT_AUTO_TRACE_GUARD_IMPL(LINE, h) GAPI_ITT_AUTO_TRACE_GUARD_IMPL_(LINE, h) + #define GAPI_ITT_AUTO_TRACE_GUARD(h) GAPI_ITT_AUTO_TRACE_GUARD_IMPL(__LINE__, h) +}} //gimpl::parallel +} //namespace cv + +#endif /* OPENCV_GAPI_GAPI_ITT_HPP */ diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/executor/gasync.cpp b/inference-engine/thirdparty/fluid/modules/gapi/src/executor/gasync.cpp index b92dbdcec44..902e6e16248 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/src/executor/gasync.cpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/executor/gasync.cpp @@ -11,6 +11,8 @@ #include #include +#include + #include #include @@ -18,16 +20,6 @@ #include #include -namespace { - //This is a tool to move initialize captures of a lambda in C++11 - template - struct copy_through_move{ - T value; - copy_through_move(T&& g) : value(std::move(g)) {} - copy_through_move(copy_through_move&&) = default; - copy_through_move(copy_through_move const& lhs) : copy_through_move(std::move(const_cast(lhs))) {} - }; -} namespace cv { namespace gapi { @@ -168,7 +160,7 @@ const char* GAsyncCanceled::what() const noexcept { //For now these async functions are simply wrapping serial version of apply/operator() into a functor. //These functors are then serialized into single queue, which is processed by a devoted background thread. void async_apply(GComputation& gcomp, std::function&& callback, GRunArgs &&ins, GRunArgsP &&outs, GCompileArgs &&args){ - //TODO: use copy_through_move for all args except gcomp + //TODO: use copy_through_move_t for all args except gcomp //TODO: avoid code duplication between versions of "async" functions auto l = [=]() mutable { auto apply_l = [&](){ @@ -181,7 +173,7 @@ void async_apply(GComputation& gcomp, std::function&& } std::future async_apply(GComputation& gcomp, GRunArgs &&ins, GRunArgsP &&outs, GCompileArgs &&args){ - copy_through_move> prms{{}}; + util::copy_through_move_t> prms{{}}; auto f = prms.value.get_future(); auto l = [=]() mutable { auto apply_l = [&](){ @@ -196,7 +188,7 @@ std::future async_apply(GComputation& gcomp, GRunArgs &&ins, GRunArgsP &&o } void async_apply(GComputation& gcomp, std::function&& callback, GRunArgs &&ins, GRunArgsP &&outs, GCompileArgs &&args, GAsyncContext& ctx){ - //TODO: use copy_through_move for all args except gcomp + //TODO: use copy_through_move_t for all args except gcomp auto l = [=, &ctx]() mutable { auto apply_l = [&](){ gcomp.apply(std::move(ins), std::move(outs), std::move(args)); @@ -208,7 +200,7 @@ void async_apply(GComputation& gcomp, std::function&& } std::future async_apply(GComputation& gcomp, GRunArgs &&ins, GRunArgsP &&outs, GCompileArgs &&args, GAsyncContext& ctx){ - copy_through_move> prms{{}}; + util::copy_through_move_t> prms{{}}; auto f = prms.value.get_future(); auto l = [=, &ctx]() mutable { auto apply_l = [&](){ @@ -248,7 +240,7 @@ void async(GCompiled& gcmpld, std::function&& callback } std::future async(GCompiled& gcmpld, GRunArgs &&ins, GRunArgsP &&outs){ - copy_through_move> prms{{}}; + util::copy_through_move_t> prms{{}}; auto f = prms.value.get_future(); auto l = [=]() mutable { auto apply_l = [&](){ @@ -263,7 +255,7 @@ std::future async(GCompiled& gcmpld, GRunArgs &&ins, GRunArgsP &&outs){ } std::future async(GCompiled& gcmpld, GRunArgs &&ins, GRunArgsP &&outs, GAsyncContext& ctx){ - copy_through_move> prms{{}}; + util::copy_through_move_t> prms{{}}; auto f = prms.value.get_future(); auto l = [=, &ctx]() mutable { auto apply_l = [&](){ diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/executor/gexecutor.cpp b/inference-engine/thirdparty/fluid/modules/gapi/src/executor/gexecutor.cpp index eb5ac27d21d..66f3b247710 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/src/executor/gexecutor.cpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/executor/gexecutor.cpp @@ -12,6 +12,8 @@ #include #include + +#include "api/gproto_priv.hpp" // ptr(GRunArgP) #include "executor/gexecutor.hpp" #include "compiler/passes/passes.hpp" @@ -72,7 +74,7 @@ cv::gimpl::GExecutor::GExecutor(std::unique_ptr &&g_model) const auto orig_data_nh = m_gim.metadata(nh).get().original_data_node; // (1) - initResource(orig_data_nh); + initResource(nh, orig_data_nh); m_slots.emplace_back(DataDesc{nh, orig_data_nh}); } break; @@ -84,7 +86,104 @@ cv::gimpl::GExecutor::GExecutor(std::unique_ptr &&g_model) } // for(gim nodes) } -void cv::gimpl::GExecutor::initResource(const ade::NodeHandle &orig_nh) +namespace cv { +namespace gimpl { +namespace magazine { +namespace { + +void bindInArgExec(Mag& mag, const RcDesc &rc, const GRunArg &arg) +{ + if (rc.shape != GShape::GMAT) + { + bindInArg(mag, rc, arg); + return; + } + auto& mag_rmat = mag.template slot()[rc.id]; + switch (arg.index()) + { + case GRunArg::index_of() : + mag_rmat = make_rmat(util::get(arg)); break; + case GRunArg::index_of() : + mag_rmat = util::get(arg); break; + default: util::throw_error(std::logic_error("content type of the runtime argument does not match to resource description ?")); + } + // FIXME: has to take extra care about meta here for this particuluar + // case, just because this function exists at all + mag.meta()[rc.id] = arg.meta; +} + +void bindOutArgExec(Mag& mag, const RcDesc &rc, const GRunArgP &arg) +{ + if (rc.shape != GShape::GMAT) + { + bindOutArg(mag, rc, arg); + return; + } + auto& mag_rmat = mag.template slot()[rc.id]; + switch (arg.index()) + { + case GRunArgP::index_of() : + mag_rmat = make_rmat(*util::get(arg)); break; + case GRunArgP::index_of() : + mag_rmat = *util::get(arg); break; + default: util::throw_error(std::logic_error("content type of the runtime argument does not match to resource description ?")); + } +} + +cv::GRunArgP getObjPtrExec(Mag& mag, const RcDesc &rc) +{ + if (rc.shape != GShape::GMAT) + { + return getObjPtr(mag, rc); + } + return GRunArgP(&mag.slot()[rc.id]); +} + +void writeBackExec(const Mag& mag, const RcDesc &rc, GRunArgP &g_arg) +{ + if (rc.shape != GShape::GMAT) + { + writeBack(mag, rc, g_arg); + return; + } + auto checkOutArgData = [&](const uchar* out_arg_data) { + //simply check that memory was not reallocated, i.e. + //both Mat and View pointing to the same memory + auto mag_data = mag.template slot().at(rc.id).get()->data(); + GAPI_Assert((out_arg_data == mag_data) && " data for output parameters was reallocated ?"); + }; + + switch (g_arg.index()) + { + case GRunArgP::index_of() : checkOutArgData(util::get(g_arg)->data); break; + case GRunArgP::index_of() : /* do nothing */ break; + default: util::throw_error(std::logic_error("content type of the runtime argument does not match to resource description ?")); + } +} + +void assignMetaStubExec(Mag& mag, const RcDesc &rc, const cv::GRunArg::Meta &meta) { + switch (rc.shape) + { + case GShape::GARRAY: mag.meta()[rc.id] = meta; break; + case GShape::GOPAQUE: mag.meta()[rc.id] = meta; break; + case GShape::GSCALAR: mag.meta()[rc.id] = meta; break; + case GShape::GFRAME: mag.meta()[rc.id] = meta; break; + case GShape::GMAT: + mag.meta() [rc.id] = meta; + mag.meta()[rc.id] = meta; +#if !defined(GAPI_STANDALONE) + mag.meta()[rc.id] = meta; +#endif + break; + default: util::throw_error(std::logic_error("Unsupported GShape type")); break; + } +} + +} // anonymous namespace +}}} // namespace cv::gimpl::magazine + + +void cv::gimpl::GExecutor::initResource(const ade::NodeHandle & nh, const ade::NodeHandle &orig_nh) { const Data &d = m_gm.metadata(orig_nh).get(); @@ -99,9 +198,19 @@ void cv::gimpl::GExecutor::initResource(const ade::NodeHandle &orig_nh) { case GShape::GMAT: { + // Let island allocate it's outputs if it can, + // allocate cv::Mat and wrap it with RMat otherwise + GAPI_Assert(!nh->inNodes().empty()); const auto desc = util::get(d.meta); - auto& mat = m_res.slot()[d.rc]; - createMat(desc, mat); + auto& exec = m_gim.metadata(nh->inNodes().front()).get().object; + auto& rmat = m_res.slot()[d.rc]; + if (exec->allocatesOutputs()) { + rmat = exec->allocate(desc); + } else { + Mat mat; + createMat(desc, mat); + rmat = make_rmat(mat); + } } break; @@ -146,11 +255,28 @@ public: class cv::gimpl::GExecutor::Output final: public cv::gimpl::GIslandExecutable::IOutput { cv::gimpl::Mag &mag; - virtual GRunArgP get(int idx) override { return magazine::getObjPtr(mag, desc()[idx]); } - virtual void post(GRunArgP&&) override { } // Do nothing here - virtual void post(EndOfStream&&) override {} // Do nothing here too + std::unordered_map out_idx; + + GRunArgP get(int idx) override + { + auto r = magazine::getObjPtrExec(mag, desc()[idx]); + // Remember the output port for this output object + out_idx[cv::gimpl::proto::ptr(r)] = idx; + return r; + } + void post(GRunArgP&&) override { } // Do nothing here + void post(EndOfStream&&) override {} // Do nothing here too + void meta(const GRunArgP &out, const GRunArg::Meta &m) override + { + const auto idx = out_idx.at(cv::gimpl::proto::ptr(out)); + magazine::assignMetaStubExec(mag, desc()[idx], m); + } public: - Output(cv::gimpl::Mag &m, const std::vector &rcs) : mag(m) { set(rcs); } + Output(cv::gimpl::Mag &m, const std::vector &rcs) + : mag(m) + { + set(rcs); + } }; void cv::gimpl::GExecutor::run(cv::gimpl::GRuntimeArgs &&args) @@ -186,11 +312,10 @@ void cv::gimpl::GExecutor::run(cv::gimpl::GRuntimeArgs &&args) using cv::util::get; const auto desc = get(d.meta); - auto check_own_mat = [&desc, &args, &index]() + auto check_rmat = [&desc, &args, &index]() { - auto& out_mat = *get(args.outObjs.at(index)); - GAPI_Assert(out_mat.data != nullptr && - desc.canDescribe(out_mat)); + auto& out_mat = *get(args.outObjs.at(index)); + GAPI_Assert(desc.canDescribe(out_mat)); }; #if !defined(GAPI_STANDALONE) @@ -202,15 +327,25 @@ void cv::gimpl::GExecutor::run(cv::gimpl::GRuntimeArgs &&args) auto& out_mat = *get(args.outObjs.at(index)); createMat(desc, out_mat); } - // In the case of own::Mat never reallocated, checked to perfectly fit required meta + // In the case of RMat check to fit required meta else { - check_own_mat(); + check_rmat(); } #else // Building standalone - output buffer should always exist, // and _exact_ match our inferred metadata - check_own_mat(); + if (cv::util::holds_alternative(args.outObjs.at(index))) + { + auto& out_mat = *get(args.outObjs.at(index)); + GAPI_Assert(out_mat.data != nullptr && + desc.canDescribe(out_mat)); + } + // In the case of RMat check to fit required meta + else + { + check_rmat(); + } #endif // !defined(GAPI_STANDALONE) } } @@ -218,12 +353,12 @@ void cv::gimpl::GExecutor::run(cv::gimpl::GRuntimeArgs &&args) for (auto it : ade::util::zip(ade::util::toRange(proto.inputs), ade::util::toRange(args.inObjs))) { - magazine::bindInArg(m_res, std::get<0>(it), std::get<1>(it)); + magazine::bindInArgExec(m_res, std::get<0>(it), std::get<1>(it)); } for (auto it : ade::util::zip(ade::util::toRange(proto.outputs), ade::util::toRange(args.outObjs))) { - magazine::bindOutArg(m_res, std::get<0>(it), std::get<1>(it)); + magazine::bindOutArgExec(m_res, std::get<0>(it), std::get<1>(it)); } // Reset internal data @@ -236,7 +371,7 @@ void cv::gimpl::GExecutor::run(cv::gimpl::GRuntimeArgs &&args) // Run the script for (auto &op : m_ops) { - // (5) + // (5), (6) Input i{m_res, op.in_objects}; Output o{m_res, op.out_objects}; op.isl_exec->run(i, o); @@ -246,7 +381,7 @@ void cv::gimpl::GExecutor::run(cv::gimpl::GRuntimeArgs &&args) for (auto it : ade::util::zip(ade::util::toRange(proto.outputs), ade::util::toRange(args.outObjs))) { - magazine::writeBack(m_res, std::get<0>(it), std::get<1>(it)); + magazine::writeBackExec(m_res, std::get<0>(it), std::get<1>(it)); } } diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/executor/gexecutor.hpp b/inference-engine/thirdparty/fluid/modules/gapi/src/executor/gexecutor.hpp index d4fe96e5b16..5d797ce6044 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/src/executor/gexecutor.hpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/executor/gexecutor.hpp @@ -54,6 +54,7 @@ namespace gimpl { class GExecutor { protected: + Mag m_res; std::unique_ptr m_orig_graph; std::shared_ptr m_island_graph; @@ -80,9 +81,7 @@ protected: class Input; class Output; - Mag m_res; - - void initResource(const ade::NodeHandle &orig_nh); // FIXME: shouldn't it be RcDesc? + void initResource(const ade::NodeHandle &nh, const ade::NodeHandle &orig_nh); // FIXME: shouldn't it be RcDesc? public: explicit GExecutor(std::unique_ptr &&g_model); diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/executor/gstreamingexecutor.cpp b/inference-engine/thirdparty/fluid/modules/gapi/src/executor/gstreamingexecutor.cpp index 1f9af29cc99..cfb4a527dfe 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/src/executor/gstreamingexecutor.cpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/executor/gstreamingexecutor.cpp @@ -6,12 +6,17 @@ #include "precomp.hpp" +#include // make_shared #include #include #include +#if !defined(GAPI_STANDALONE) +#include // GCopy -- FIXME - to be removed! +#endif // GAPI_STANDALONE + #include "api/gproto_priv.hpp" // ptr(GRunArgP) #include "compiler/passes/passes.hpp" #include "backends/common/gbackend.hpp" // createMat @@ -60,14 +65,27 @@ public: struct DataQueue { static const char *name() { return "StreamingDataQueue"; } + enum tag { DESYNC }; // Enum of 1 element: purely a syntax sugar explicit DataQueue(std::size_t capacity) { - if (capacity) { - q.set_capacity(capacity); + // Note: `ptr` is shared, while the `q` is a shared + auto ptr = std::make_shared(); + if (capacity != 0) { + ptr->set_capacity(capacity); } + q = std::move(ptr); + } + explicit DataQueue(tag t) + : q(new cv::gimpl::stream::DesyncQueue()) { + GAPI_Assert(t == DESYNC); } - cv::gimpl::stream::Q q; + // FIXME: ADE metadata requires types to be copiable + std::shared_ptr q; +}; + +struct DesyncSpecialCase { + static const char *name() { return "DesyncSpecialCase"; } }; std::vector reader_queues( ade::Graph &g, @@ -77,7 +95,7 @@ std::vector reader_queues( ade::Graph &g, std::vector result; for (auto &&out_eh : obj->outEdges()) { - result.push_back(&qgr.metadata(out_eh).get().q); + result.push_back(qgr.metadata(out_eh).get().q.get()); } return result; } @@ -90,7 +108,7 @@ std::vector input_queues( ade::Graph &g, for (auto &&in_eh : obj->inEdges()) { result.push_back(qgr.metadata(in_eh).contains() - ? &qgr.metadata(in_eh).get().q + ? qgr.metadata(in_eh).get().q.get() : nullptr); } return result; @@ -109,7 +127,13 @@ void sync_data(cv::GRunArgs &results, cv::GRunArgsP &outputs) switch (out_obj.index()) { case T::index_of(): - *cv::util::get(out_obj) = std::move(cv::util::get(res_obj)); + { + auto out_mat_p = cv::util::get(out_obj); + auto view = cv::util::get(res_obj).access(cv::RMat::Access::R); + *out_mat_p = cv::gimpl::asMat(view).clone(); + } break; + case T::index_of(): + *cv::util::get(out_obj) = std::move(cv::util::get(res_obj)); break; case T::index_of(): *cv::util::get(out_obj) = std::move(cv::util::get(res_obj)); @@ -120,6 +144,9 @@ void sync_data(cv::GRunArgs &results, cv::GRunArgsP &outputs) case T::index_of(): cv::util::get(out_obj).mov(cv::util::get(res_obj)); break; + case T::index_of(): + *cv::util::get(out_obj) = std::move(cv::util::get(res_obj)); + break; default: GAPI_Assert(false && "This value type is not supported!"); // ...maybe because of STANDALONE mode. break; @@ -127,6 +154,77 @@ void sync_data(cv::GRunArgs &results, cv::GRunArgsP &outputs) } } +// FIXME: Is there a way to derive function from its GRunArgsP version? +template using O = cv::util::optional; +void sync_data(cv::gimpl::stream::Result &r, cv::GOptRunArgsP &outputs) +{ + namespace own = cv::gapi::own; + + for (auto && it : ade::util::zip(ade::util::toRange(outputs), + ade::util::toRange(r.args), + ade::util::toRange(r.flags))) + { + auto &out_obj = std::get<0>(it); + auto &res_obj = std::get<1>(it); + bool available = std::get<2>(it); + + using T = cv::GOptRunArgP; +#define HANDLE_CASE(Type) \ + case T::index_of*>(): \ + if (available) { \ + *cv::util::get*>(out_obj) \ + = cv::util::make_optional(std::move(cv::util::get(res_obj))); \ + } else { \ + cv::util::get*>(out_obj)->reset(); \ + } + + // FIXME: this conversion should be unified + switch (out_obj.index()) + { + HANDLE_CASE(cv::Scalar); break; + HANDLE_CASE(cv::RMat); break; + + case T::index_of*>(): { + // Mat: special handling. + auto &mat_opt = *cv::util::get*>(out_obj); + if (available) { + auto q_map = cv::util::get(res_obj).access(cv::RMat::Access::R); + // FIXME: Copy! Maybe we could do some optimization for this case! + // e.g. don't handle RMat for last ilsand in the graph. + // It is not always possible though. + mat_opt = cv::util::make_optional(cv::gimpl::asMat(q_map).clone()); + } else { + mat_opt.reset(); + } + } break; + case T::index_of(): { + // std::vector<>: special handling + auto &vec_opt = cv::util::get(out_obj); + if (available) { + vec_opt.mov(cv::util::get(res_obj)); + } else { + vec_opt.reset(); + } + } break; + case T::index_of(): { + // std::vector<>: special handling + auto &opq_opt = cv::util::get(out_obj); + if (available) { + opq_opt.mov(cv::util::get(res_obj)); + } else { + opq_opt.reset(); + } + } break; + default: + // ...maybe because of STANDALONE mode. + GAPI_Assert(false && "This value type is not supported!"); + break; + } + } +#undef HANDLE_CASE +} + + // Pops an item from every input queue and combine it to the final // result. Blocks the current thread. Returns true if the vector has // been obtained successfully and false if a Stop message has been @@ -200,12 +298,39 @@ class QueueReader bool m_finishing = false; // Set to true once a "soft" stop is received std::vector m_cmd; + void rewindToStop(std::vector &in_queues, + const std::size_t this_id); + public: - bool getInputVector(std::vector &in_queues, - cv::GRunArgs &in_constants, - cv::GRunArgs &isl_inputs); + bool getInputVector (std::vector &in_queues, + cv::GRunArgs &in_constants, + cv::GRunArgs &isl_inputs); + + bool getResultsVector(std::vector &in_queues, + const std::vector &in_mapping, + const std::size_t out_size, + cv::GRunArgs &out_results); }; +// This method handles a stop sign got from some input +// island. Reiterate through all _remaining valid_ queues (some of +// them can be set to nullptr already -- see handling in +// getInputVector) and rewind data to every Stop sign per queue. +void QueueReader::rewindToStop(std::vector &in_queues, + const std::size_t this_id) +{ + for (auto &&qit : ade::util::indexed(in_queues)) + { + auto id2 = ade::util::index(qit); + auto &q2 = ade::util::value(qit); + if (this_id == id2) continue; + + Cmd cmd; + while (q2 && !cv::util::holds_alternative(cmd)) + q2->pop(cmd); + } +} + bool QueueReader::getInputVector(std::vector &in_queues, cv::GRunArgs &in_constants, cv::GRunArgs &isl_inputs) @@ -228,16 +353,14 @@ bool QueueReader::getInputVector(std::vector &in_queues, // value-initialized scalar) // It can also hold a constant value received with // Stop::Kind::CNST message (see above). - // FIXME: Variant move problem - isl_inputs[id] = const_cast(in_constants[id]); + isl_inputs[id] = in_constants[id]; continue; } q->pop(m_cmd[id]); if (!cv::util::holds_alternative(m_cmd[id])) { - // FIXME: Variant move problem - isl_inputs[id] = const_cast(cv::util::get(m_cmd[id])); + isl_inputs[id] = cv::util::get(m_cmd[id]); } else // A Stop sign { @@ -260,25 +383,12 @@ bool QueueReader::getInputVector(std::vector &in_queues, // NEXT time (on a next call to getInputVector()), the // "q==nullptr" check above will be triggered, but now // we need to make it manually: - isl_inputs[id] = const_cast(in_constants[id]); + isl_inputs[id] = in_constants[id]; } else { GAPI_Assert(stop.kind == Stop::Kind::HARD); - // Just got a stop sign. Reiterate through all - // _remaining valid_ queues (some of them can be - // set to nullptr already -- see above) and rewind - // data to every Stop sign per queue - for (auto &&qit : ade::util::indexed(in_queues)) - { - auto id2 = ade::util::index(qit); - auto &q2 = ade::util::value(qit); - if (id == id2) continue; - - Cmd cmd2; - while (q2 && !cv::util::holds_alternative(cmd2)) - q2->pop(cmd2); - } + rewindToStop(in_queues, id); // After queues are read to the proper indicator, // indicate end-of-stream return false; @@ -297,6 +407,60 @@ bool QueueReader::getInputVector(std::vector &in_queues, return true; // A regular case - there is data to process. } +// This is a special method to obtain a result vector +// for the entire pipeline's outputs. +// +// After introducing desync(), the pipeline output's vector +// can be produced just partially. Also, if a desynchronized +// path has multiple outputs for the pipeline, _these_ outputs +// should still come synchronized to the end user (via pull()) +// +// +// This method handles all this. +// It takes a number of input queues, which may or may not be +// equal to the number of pipeline outputs (<=). +// It also takes indexes saying which queue produces which +// output in the resulting pipeline. +// +// `out_results` is always produced with the size of full output +// vector. In the desync case, the number of in_queues will +// be less than this size and some of the items won't be produced. +// In the sync case, there will be a 1-1 mapping. +// +// In the desync case, there _will be_ multiple collector threads +// calling this method, and pushing their whole-pipeline outputs +// (_may be_ partially filled) to the same final output queue. +// The receiver part at the GStreamingExecutor level won't change +// because of that. +bool QueueReader::getResultsVector(std::vector &in_queues, + const std::vector &in_mapping, + const std::size_t out_size, + cv::GRunArgs &out_results) +{ + m_cmd.resize(out_size); + for (auto &&it : ade::util::indexed(in_queues)) + { + auto ii = ade::util::index(it); + auto oi = in_mapping[ii]; + auto &q = ade::util::value(it); + q->pop(m_cmd[oi]); + if (!cv::util::holds_alternative(m_cmd[oi])) + { + out_results[oi] = std::move(cv::util::get(m_cmd[oi])); + } + else // A Stop sign + { + // In theory, the CNST should never reach here. + // Collector thread never handles the inputs directly + // (collector's input queues are always produced by + // islands in the graph). + rewindToStop(in_queues, ii); + return false; + } // if(Stop) + } // for(in_queues) + return true; +} + // This thread is a plain dump source actor. What it do is just: // - Check input queue (the only one) for a control command @@ -408,6 +572,7 @@ class StreamingOutput final: public cv::gimpl::GIslandExecutable::IOutput // These objects are owned externally const cv::GMetaArgs &m_metas; std::vector< std::vector > &m_out_queues; + std::shared_ptr m_island; // Allocate a new data object for output under idx // Prepare this object for posting @@ -430,10 +595,19 @@ class StreamingOutput final: public cv::gimpl::GIslandExecutable::IOutput // FIXME: This is absolutely ugly but seem to work perfectly for its purpose. case cv::GShape::GMAT: { - MatType newMat; - cv::gimpl::createMat(cv::util::get(m_metas[idx]), newMat); - out_arg = cv::GRunArg(std::move(newMat)); - ret_val = cv::GRunArgP(&cv::util::get(out_arg)); + auto desc = cv::util::get(m_metas[idx]); + if (m_island->allocatesOutputs()) + { + out_arg = cv::GRunArg(m_island->allocate(desc)); + } + else + { + MatType newMat; + cv::gimpl::createMat(desc, newMat); + auto rmat = cv::make_rmat(newMat); + out_arg = cv::GRunArg(std::move(rmat)); + } + ret_val = cv::GRunArgP(&cv::util::get(out_arg)); } break; case cv::GShape::GSCALAR: @@ -465,6 +639,13 @@ class StreamingOutput final: public cv::gimpl::GIslandExecutable::IOutput ret_val = cv::GRunArgP(rr); } break; + case cv::GShape::GFRAME: + { + cv::MediaFrame frame; + out_arg = cv::GRunArg(std::move(frame)); + ret_val = cv::GRunArgP(&cv::util::get(out_arg)); + } + break; default: cv::util::throw_error(std::logic_error("Unsupported GShape")); } @@ -493,8 +674,7 @@ class StreamingOutput final: public cv::gimpl::GIslandExecutable::IOutput Cmd cmd; if (cv::util::holds_alternative(post_iter->data)) { - // FIXME: That ugly VARIANT problem - cmd = Cmd{const_cast(cv::util::get(post_iter->data))}; + cmd = Cmd{cv::util::get(post_iter->data)}; } else { @@ -504,8 +684,7 @@ class StreamingOutput final: public cv::gimpl::GIslandExecutable::IOutput } for (auto &&q : m_out_queues[out_idx]) { - // FIXME: This ugly VARIANT problem - q->push(const_cast(cmd)); + q->push(cmd); } post_iter = m_postings[out_idx].erase(post_iter); } @@ -535,12 +714,23 @@ class StreamingOutput final: public cv::gimpl::GIslandExecutable::IOutput } } } + void meta(const cv::GRunArgP &out, const cv::GRunArg::Meta &m) override + { + const auto it = m_postIdx.find(cv::gimpl::proto::ptr(out)); + GAPI_Assert(it != m_postIdx.end()); + + const auto out_iter = it->second.second; + cv::util::get(out_iter->data).meta = m; + } + public: explicit StreamingOutput(const cv::GMetaArgs &metas, std::vector< std::vector > &out_queues, - const std::vector &out_descs) + const std::vector &out_descs, + std::shared_ptr island) : m_metas(metas) , m_out_queues(out_queues) + , m_island(island) { set(out_descs); m_postings.resize(out_descs.size()); @@ -573,7 +763,7 @@ void islandActorThread(std::vector in_rcs, // GAPI_Assert(out_queues.size() == out_metas.size()); QueueReader qr; StreamingInput input(qr, in_queues, in_constants, in_rcs); - StreamingOutput output(out_metas, out_queues, out_rcs); + StreamingOutput output(out_metas, out_queues, out_rcs, island); while (!output.done()) { island->run(input, output); @@ -585,22 +775,84 @@ void islandActorThread(std::vector in_rcs, // // and then put the resulting vector into one single queue. While it // looks redundant, it simplifies dramatically the way how try_pull() // is implemented - we need to check one queue instead of many. -void collectorThread(std::vector in_queues, - Q& out_queue) +// +// After desync() is added, there may be multiple collector threads +// running, every thread producing its own part of the partial +// pipeline output (optional...). All partial outputs are pushed +// to the same output queue and then picked by GStreamingExecutor +// in the end. +void collectorThread(std::vector in_queues, + std::vector in_mapping, + const std::size_t out_size, + const bool handle_stop, + Q& out_queue) { + // These flags are static now: regardless if the sync or + // desync branch is collected by this thread, all in_queue + // data should come in sync. + std::vector flags(out_size, false); + for (auto idx : in_mapping) { + flags[idx] = true; + } + QueueReader qr; while (true) { - cv::GRunArgs this_result(in_queues.size()); - cv::GRunArgs this_const(in_queues.size()); - if (!qr.getInputVector(in_queues, this_const, this_result)) + cv::GRunArgs this_result(out_size); + const bool ok = qr.getResultsVector(in_queues, in_mapping, out_size, this_result); + if (!ok) { - out_queue.push(Cmd{Stop{}}); + if (handle_stop) + { + out_queue.push(Cmd{Stop{}}); + } + // Terminate the thread anyway return; } - out_queue.push(Cmd{this_result}); + out_queue.push(Cmd{Result{std::move(this_result), flags}}); } } + +void check_DesyncObjectConsumedByMultipleIslands(const cv::gimpl::GIslandModel::Graph &gim) { + using namespace cv::gimpl; + + // Since the limitation exists only in this particular + // implementation, the check is also done only here but not at the + // graph compiler level. + // + // See comment in desync(GMat) src/api/kernels_streaming.cpp for details. + for (auto &&nh : gim.nodes()) { + if (gim.metadata(nh).get().k == NodeKind::SLOT) { + // SLOTs are read by ISLANDs, so look for the metadata + // of the outbound edges + std::unordered_map out_desync_islands; + for (auto &&out_eh : nh->outEdges()) { + if (gim.metadata(out_eh).contains()) { + // This is a desynchronized edge + // Look what Island it leads to + const auto out_desync_idx = gim.metadata(out_eh) + .get().index; + const auto out_island = gim.metadata(out_eh->dstNode()) + .get().object; + + auto it = out_desync_islands.find(out_desync_idx); + if (it != out_desync_islands.end()) { + // If there's already an edge with this desync + // id, it must point to the same island object + GAPI_Assert(it->second == out_island.get() + && "A single desync object may only be used by a single island!"); + } else { + // Store the island pointer for the further check + out_desync_islands[out_desync_idx] = out_island.get(); + } + } // if(desync) + } // for(out_eh) + // There must be only one backend in the end of the day + // (under this desync path) + } // if(SLOT) + } // for(nodes) +} + } // anonymous namespace // GStreamingExecutor expects compile arguments as input to have possibility to do @@ -612,20 +864,28 @@ cv::gimpl::GStreamingExecutor::GStreamingExecutor(std::unique_ptr && .get().model) , m_comp_args(comp_args) , m_gim(*m_island_graph) + , m_desync(GModel::Graph(*m_orig_graph).metadata() + .contains()) { GModel::Graph gm(*m_orig_graph); // NB: Right now GIslandModel is acyclic, and all the below code assumes that. - // NB: This naive execution code is taken from GExecutor nearly "as-is" + // NB: This naive execution code is taken from GExecutor nearly + // "as-is" + + if (m_desync) { + check_DesyncObjectConsumedByMultipleIslands(m_gim); + } const auto proto = gm.metadata().get(); m_emitters .resize(proto.in_nhs.size()); m_emitter_queues.resize(proto.in_nhs.size()); m_sinks .resize(proto.out_nhs.size()); - m_sink_queues .resize(proto.out_nhs.size()); + m_sink_queues .resize(proto.out_nhs.size(), nullptr); + m_sink_sync .resize(proto.out_nhs.size(), -1); // Very rough estimation to limit internal queue sizes. // Pipeline depth is equal to number of its (pipeline) steps. - const auto queue_capacity = std::count_if + const auto queue_capacity = 3*std::count_if (m_gim.nodes().begin(), m_gim.nodes().end(), [&](ade::NodeHandle nh) { @@ -705,15 +965,53 @@ cv::gimpl::GStreamingExecutor::GStreamingExecutor(std::unique_ptr && , isl_exec }); // Initialize queues for every operation's input - ade::TypedGraph qgr(*m_island_graph); + ade::TypedGraph qgr(*m_island_graph); + bool is_desync_start = false; for (auto eh : nh->inEdges()) { // ...only if the data is not compile-const if (const_ins.count(eh->srcNode()) == 0) { - qgr.metadata(eh).set(DataQueue(queue_capacity)); - m_internal_queues.insert(&qgr.metadata(eh).get().q); + if (m_gim.metadata(eh).contains()) { + qgr.metadata(eh).set(DataQueue(DataQueue::DESYNC)); + is_desync_start = true; + } else if (qgr.metadata(eh).contains()) { + // See comment below + // Limit queue size to 1 in this case + qgr.metadata(eh).set(DataQueue(1u)); + } else { + qgr.metadata(eh).set(DataQueue(queue_capacity)); + } + m_internal_queues.insert(qgr.metadata(eh).get().q.get()); } } + // WORKAROUND: + // Since now we always know desync() is followed by copy(), + // copy is always the island with DesyncIslEdge. + // Mark the node's outputs a special way so then its following + // queue sizes will be limited to 1 (to avoid copy reading more + // data in advance - as there's no other way for the underlying + // "slow" part to control it) + if (is_desync_start) { + auto isl = m_gim.metadata(nh).get().object; + // In the current implementation, such islands + // _must_ start with copy + GAPI_Assert(isl->in_ops().size() == 1u); +#if !defined(GAPI_STANDALONE) + GAPI_Assert(GModel::Graph(*m_orig_graph) + .metadata(*isl->in_ops().begin()) + .get() + .k.name == cv::gapi::core::GCopy::id()); +#endif // GAPI_STANDALONE + for (auto out_nh : nh->outNodes()) { + for (auto out_eh : out_nh->outEdges()) { + qgr.metadata(out_eh).set(DesyncSpecialCase{}); + } + } + } + // It is ok to do it here since the graph is visited in + // a topologic order and its consumers (those checking + // their input edges & initializing queues) are yet to be + // visited } break; case NodeKind::SLOT: @@ -742,7 +1040,14 @@ cv::gimpl::GStreamingExecutor::GStreamingExecutor(std::unique_ptr && ade::TypedGraph qgr(*m_island_graph); GAPI_Assert(nh->inEdges().size() == 1u); qgr.metadata(nh->inEdges().front()).set(DataQueue(queue_capacity)); - m_sink_queues[sink_idx] = &qgr.metadata(nh->inEdges().front()).get().q; + m_sink_queues[sink_idx] = qgr.metadata(nh->inEdges().front()).get().q.get(); + + // Assign a desync tag + const auto sink_out_nh = gm.metadata().get().out_nhs[sink_idx]; + if (gm.metadata(sink_out_nh).contains()) { + // metadata().get_or<> could make this thing better + m_sink_sync[sink_idx] = gm.metadata(sink_out_nh).get().index; + } } break; default: @@ -750,7 +1055,23 @@ cv::gimpl::GStreamingExecutor::GStreamingExecutor(std::unique_ptr && break; } // switch(kind) } // for(gim nodes) - m_out_queue.set_capacity(queue_capacity); + + // If there are desynchronized parts in the graph, there may be + // multiple theads polling every separate (desynchronized) + // branch in the graph individually. Prepare a mapping information + // for any such thread + for (auto &&idx : ade::util::iota(m_sink_queues.size())) { + auto path_id = m_sink_sync[idx]; + auto &info = m_collector_map[path_id]; + info.queues.push_back(m_sink_queues[idx]); + info.mapping.push_back(static_cast(idx)); + } + + // Reserve space in the final queue based on the number + // of desync parts (they can generate output individually + // per the same input frame, so the output traffic multiplies) + GAPI_Assert(m_collector_map.size() > 0u); + m_out_queue.set_capacity(queue_capacity * m_collector_map.size()); } cv::gimpl::GStreamingExecutor::~GStreamingExecutor() @@ -920,7 +1241,6 @@ void cv::gimpl::GStreamingExecutor::setSource(GRunArgs &&ins) real_video_completion_cb); } - // Now do this for every island (in a topological order) for (auto &&op : m_ops) { @@ -956,10 +1276,27 @@ void cv::gimpl::GStreamingExecutor::setSource(GRunArgs &&ins) out_queues); } - // Finally, start a collector thread. - m_threads.emplace_back(collectorThread, - m_sink_queues, - std::ref(m_out_queue)); + // Finally, start collector thread(s). + // If there are desynchronized parts in the graph, there may be + // multiple theads polling every separate (desynchronized) + // branch in the graph individually. + const bool has_main_path = m_sink_sync.end() != + std::find(m_sink_sync.begin(), m_sink_sync.end(), -1); + for (auto &&info : m_collector_map) { + m_threads.emplace_back(collectorThread, + info.second.queues, + info.second.mapping, + m_sink_queues.size(), + has_main_path ? info.first == -1 : true, // see below (*) + std::ref(m_out_queue)); + + // (*) - there may be a problem with desynchronized paths when those work + // faster than the main path. In this case, the desync paths get "Stop" message + // earlier and thus broadcast it down to pipeline gets stopped when there is + // some "main path" data to process. This new collectorThread's flag regulates it: + // - desync paths should never post Stop message if there is a main path. + // - if there is no main path, than any desync path can terminate the execution. + } state = State::READY; } @@ -1000,15 +1337,25 @@ void cv::gimpl::GStreamingExecutor::wait_shutdown() for (auto &q : m_internal_queues) q->clear(); m_out_queue.clear(); + for (auto &&op : m_ops) { + op.isl_exec->handleStopStream(); + } + state = State::STOPPED; } bool cv::gimpl::GStreamingExecutor::pull(cv::GRunArgsP &&outs) { + // This pull() can only be called when there's no desynchronized + // parts in the graph. + GAPI_Assert(!m_desync && + "This graph has desynchronized parts! Please use another pull()"); + if (state == State::STOPPED) return false; GAPI_Assert(state == State::RUNNING); - GAPI_Assert(m_sink_queues.size() == outs.size()); + GAPI_Assert(m_sink_queues.size() == outs.size() && + "Number of data objects in cv::gout() must match the number of graph outputs in cv::GOut()"); Cmd cmd; m_out_queue.pop(cmd); @@ -1018,12 +1365,39 @@ bool cv::gimpl::GStreamingExecutor::pull(cv::GRunArgsP &&outs) return false; } - GAPI_Assert(cv::util::holds_alternative(cmd)); - cv::GRunArgs &this_result = cv::util::get(cmd); + GAPI_Assert(cv::util::holds_alternative(cmd)); + cv::GRunArgs &this_result = cv::util::get(cmd).args; sync_data(this_result, outs); return true; } +bool cv::gimpl::GStreamingExecutor::pull(cv::GOptRunArgsP &&outs) +{ + // This pull() can only be called in both cases: if there are + // desyncrhonized parts or not. + + // FIXME: so far it is a full duplicate of standard pull except + // the sync_data version called. + if (state == State::STOPPED) + return false; + GAPI_Assert(state == State::RUNNING); + GAPI_Assert(m_sink_queues.size() == outs.size() && + "Number of data objects in cv::gout() must match the number of graph outputs in cv::GOut()"); + + Cmd cmd; + m_out_queue.pop(cmd); + if (cv::util::holds_alternative(cmd)) + { + wait_shutdown(); + return false; + } + + GAPI_Assert(cv::util::holds_alternative(cmd)); + sync_data(cv::util::get(cmd), outs); + return true; +} + + bool cv::gimpl::GStreamingExecutor::try_pull(cv::GRunArgsP &&outs) { if (state == State::STOPPED) @@ -1041,8 +1415,8 @@ bool cv::gimpl::GStreamingExecutor::try_pull(cv::GRunArgsP &&outs) return false; } - GAPI_Assert(cv::util::holds_alternative(cmd)); - cv::GRunArgs &this_result = cv::util::get(cmd); + GAPI_Assert(cv::util::holds_alternative(cmd)); + cv::GRunArgs &this_result = cv::util::get(cmd).args; sync_data(this_result, outs); return true; } diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/executor/gstreamingexecutor.hpp b/inference-engine/thirdparty/fluid/modules/gapi/src/executor/gstreamingexecutor.hpp index d10f9eddd09..b6093ac1ef3 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/src/executor/gstreamingexecutor.hpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/executor/gstreamingexecutor.hpp @@ -14,6 +14,8 @@ #include // unique_ptr, shared_ptr #include // thread +#include +#include #if defined(HAVE_TBB) # include // FIXME: drop it from here! @@ -22,6 +24,7 @@ template using QueueClass = tbb::concurrent_bounded_queue; # include "executor/conc_queue.hpp" template using QueueClass = cv::gapi::own::concurrent_bounded_queue; #endif // TBB +#include "executor/last_value.hpp" #include @@ -40,14 +43,61 @@ struct Stop { cv::GRunArg cdata; // const data for CNST stop }; +struct Result { + cv::GRunArgs args; // Full results vector + std::vector flags; // Availability flags (in case of desync) +}; + using Cmd = cv::util::variant < cv::util::monostate , Start // Tells emitters to start working. Not broadcasted to workers. , Stop // Tells emitters to stop working. Broadcasted to workers. , cv::GRunArg // Workers data payload to process. - , cv::GRunArgs // Full results vector + , Result // Pipeline's data for gout() >; -using Q = QueueClass; + +// Interface over a queue. The underlying queue implementation may be +// different. This class is mainly introduced to bring some +// abstraction over the real queues (bounded in-order) and a +// desynchronized data slots (see required to implement +// cv::gapi::desync) + +class Q { +public: + virtual void push(const Cmd &cmd) = 0; + virtual void pop(Cmd &cmd) = 0; + virtual bool try_pop(Cmd &cmd) = 0; + virtual void clear() = 0; + virtual ~Q() = default; +}; + +// A regular queue implementation +class SyncQueue final: public Q { + QueueClass m_q; // FIXME: OWN or WRAP?? + +public: + virtual void push(const Cmd &cmd) override { m_q.push(cmd); } + virtual void pop(Cmd &cmd) override { m_q.pop(cmd); } + virtual bool try_pop(Cmd &cmd) override { return m_q.try_pop(cmd); } + virtual void clear() override { m_q.clear(); } + + void set_capacity(std::size_t c) { m_q.set_capacity(c);} +}; + +// Desynchronized "queue" implementation +// Every push overwrites value which is not yet popped +// This container can hold 0 or 1 element +// Special handling for Stop is implemented (FIXME: not really) +class DesyncQueue final: public Q { + cv::gapi::own::last_written_value m_v; + +public: + virtual void push(const Cmd &cmd) override { m_v.push(cmd); } + virtual void pop(Cmd &cmd) override { m_v.pop(cmd); } + virtual bool try_pop(Cmd &cmd) override { return m_v.try_pop(cmd); } + virtual void clear() override { m_v.clear(); } +}; + } // namespace stream // FIXME: Currently all GExecutor comments apply also @@ -87,6 +137,7 @@ protected: util::optional m_reshapable; cv::gimpl::GIslandModel::Graph m_gim; // FIXME: make const? + const bool m_desync; // FIXME: Naive executor details are here for now // but then it should be moved to another place @@ -117,11 +168,27 @@ protected: std::vector m_sinks; std::vector m_threads; - std::vector m_emitter_queues; - std::vector m_const_emitter_queues; // a view over m_emitter_queues - std::vector m_sink_queues; - std::unordered_set m_internal_queues; - stream::Q m_out_queue; + std::vector m_emitter_queues; + + // a view over m_emitter_queues + std::vector m_const_emitter_queues; + + std::vector m_sink_queues; + + // desync path tags for outputs. -1 means that output + // doesn't belong to a desync path + std::vector m_sink_sync; + + std::unordered_set m_internal_queues; + stream::SyncQueue m_out_queue; + + // Describes mapping from desync paths to collector threads + struct CollectorThreadInfo { + std::vector queues; + std::vector mapping; + }; + std::unordered_map m_collector_map; + void wait_shutdown(); @@ -132,6 +199,7 @@ public: void setSource(GRunArgs &&args); void start(); bool pull(cv::GRunArgsP &&outs); + bool pull(cv::GOptRunArgsP &&outs); bool try_pull(cv::GRunArgsP &&outs); void stop(); bool running() const; diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/executor/gtbbexecutor.cpp b/inference-engine/thirdparty/fluid/modules/gapi/src/executor/gtbbexecutor.cpp new file mode 100644 index 00000000000..03c6757dc64 --- /dev/null +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/executor/gtbbexecutor.cpp @@ -0,0 +1,445 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Copyright (C) 2020 Intel Corporation + +#include "gtbbexecutor.hpp" + +#if defined(HAVE_TBB) +#include "gapi_itt.hpp" + +#include +#include +#include "logger.hpp" // GAPI_LOG + +#include +#include // unique_ptr + +#include +#include + +#include + +#define ASSERT(expr) GAPI_DbgAssert(expr) + +#define LOG_INFO(tag, ...) GAPI_LOG_INFO(tag, __VA_ARGS__) +#define LOG_WARNING(tag, ...) GAPI_LOG_WARNING(tag, __VA_ARGS__) +#define LOG_DEBUG(tag, ...) GAPI_LOG_DEBUG(tag, __VA_ARGS__) + + +#ifdef OPENCV_WITH_ITT +const __itt_domain* cv::gimpl::parallel::gapi_itt_domain = __itt_domain_create("GAPI Context"); +#endif + +namespace cv { namespace gimpl { namespace parallel { + +namespace detail { +// some helper staff to deal with tbb::task related entities +namespace tasking { + +enum class use_tbb_scheduler_bypass { + NO, + YES +}; + +inline void assert_graph_is_running(tbb::task* root) { + // tbb::task::wait_for_all block calling thread until task ref_count is dropped to 1 + // So if the root task ref_count is greater than 1 graph still has a job to do and + // according wait_for_all() has not yet returned + ASSERT(root->ref_count() > 1); +} + +// made template to break circular dependencies +template +struct functor_task : tbb::task { + body_t body; + + template + functor_task(arg_t&& a) : body(std::forward(a)) {} + + tbb::task * execute() override { + assert_graph_is_running(parent()); + + auto reuse_current_task = body(); + // if needed, say TBB to execute current task once again + return (use_tbb_scheduler_bypass::YES == reuse_current_task) ? (recycle_as_continuation(), this) : nullptr; + } + ~functor_task() { + assert_graph_is_running(parent()); + } +}; + +template +auto allocate_task(tbb::task* root, body_t const& body) -> functor_task* { + return new(tbb::task::allocate_additional_child_of(*root)) functor_task{body}; +} + +template +void spawn_no_assert(tbb::task* root, body_t const& body) { + tbb::task::spawn(* allocate_task(root, body)); +} + +#ifdef OPENCV_WITH_ITT +namespace { + static __itt_string_handle* ittTbbAddReadyBlocksToQueue = __itt_string_handle_create("add ready blocks to queue"); + static __itt_string_handle* ittTbbSpawnReadyBlocks = __itt_string_handle_create("spawn ready blocks"); + static __itt_string_handle* ittTbbEnqueueSpawnReadyBlocks = __itt_string_handle_create("enqueueing a spawn of ready blocks"); + static __itt_string_handle* ittTbbUnlockMasterThread = __itt_string_handle_create("Unlocking master thread"); +} +#endif // OPENCV_WITH_ITT + + +template +void batch_spawn(size_t count, tbb::task* root, body_t const& body, bool do_assert_graph_is_running = true) { + GAPI_ITT_AUTO_TRACE_GUARD(ittTbbSpawnReadyBlocks); + if (do_assert_graph_is_running) { + assert_graph_is_running(root); + } + + for (size_t i=0; i; + +root_t inline create_root(tbb::task_group_context& ctx) { + root_t root{new (tbb::task::allocate_root(ctx)) tbb::empty_task}; + root->set_ref_count(1); // required by wait_for_all, as it waits until counter drops to 1 + return root; +} + +std::size_t inline tg_context_traits() { + // Specify tbb::task_group_context::concurrent_wait in the traits to ask TBB scheduler not to change + // ref_count of the task we wait on (root) when wait is complete. + return tbb::task_group_context::default_traits | tbb::task_group_context::concurrent_wait; +} + +} // namespace tasking + +namespace async { +struct async_tasks_t { + std::atomic count {0}; + std::condition_variable cv; + std::mutex mtx; +}; + +enum class wake_tbb_master { + NO, + YES +}; + +void inline wake_master(async_tasks_t& async_tasks, wake_tbb_master wake_master) { + // TODO: seems that this can be relaxed + auto active_async_tasks = --async_tasks.count; + + if ((active_async_tasks == 0) || (wake_master == wake_tbb_master::YES)) { + // Was the last async task or asked to wake TBB master up(e.g. there are new TBB tasks to execute) + GAPI_ITT_AUTO_TRACE_GUARD(ittTbbUnlockMasterThread); + // While decrement of async_tasks_t::count is atomic, it might occur after the waiting + // thread has read its value but _before_ it actually starts waiting on the condition variable. + // So, lock acquire is needed to guarantee that current condition check (if any) in the waiting thread + // (possibly ran in parallel to async_tasks_t::count decrement above) is completed _before_ signal is issued. + // Therefore when notify_one is called, waiting thread is either sleeping on the condition variable or + // running a new check which is guaranteed to pick the new value and return from wait(). + + // There is no need to _hold_ the lock while signaling, only to acquire it. + std::unique_lock {async_tasks.mtx}; // Acquire and release the lock. + async_tasks.cv.notify_one(); + } +} + +struct master_thread_sleep_lock_t +{ + struct sleep_unlock { + void operator()(async_tasks_t* t) const { + ASSERT(t); + wake_master(*t, wake_tbb_master::NO); + } + }; + + std::unique_ptr guard; + + master_thread_sleep_lock_t() = default; + master_thread_sleep_lock_t(async_tasks_t* async_tasks_ptr) : guard(async_tasks_ptr) { + // TODO: seems that this can be relaxed + ++(guard->count); + } + + void unlock(wake_tbb_master wake) { + if (auto* p = guard.release()) { + wake_master(*p, wake); + } + } +}; + +master_thread_sleep_lock_t inline lock_sleep_master(async_tasks_t& async_tasks) { + return {&async_tasks}; +} + +enum class is_tbb_work_present { + NO, + YES +}; + +//RAII object to block TBB master thread (one that does wait_for_all()) +//N.B. :wait_for_all() return control when root ref_count drops to 1, +struct root_wait_lock_t { + struct root_decrement_ref_count{ + void operator()(tbb::task* t) const { + ASSERT(t); + auto result = t->decrement_ref_count(); + ASSERT(result >= 1); + } + }; + + std::unique_ptr guard; + + root_wait_lock_t() = default; + root_wait_lock_t(tasking::root_t& root, is_tbb_work_present& previous_state) : guard{root.get()} { + // Block the master thread while the *this object is alive. + auto new_root_ref_count = root->add_ref_count(1); + previous_state = (new_root_ref_count == 2) ? is_tbb_work_present::NO : is_tbb_work_present::YES; + } + +}; + +root_wait_lock_t inline lock_wait_master(tasking::root_t& root, is_tbb_work_present& previous_state) { + return root_wait_lock_t{root, previous_state}; +} + +} // namespace async + +inline tile_node* pop(prio_items_queue_t& q) { + tile_node* node = nullptr; + bool popped = q.try_pop(node); + ASSERT(popped && "queue should be non empty as we push items to it before we spawn"); + return node; +} + +namespace graph { + // Returns : number of items actually pushed into the q + std::size_t inline push_ready_dependants(prio_items_queue_t& q, tile_node* node) { + GAPI_ITT_AUTO_TRACE_GUARD(ittTbbAddReadyBlocksToQueue); + std::size_t ready_items = 0; + // enable dependent tasks + for (auto* dependant : node->dependants) { + // fetch_and_sub returns previous value + if (1 == dependant->dependency_count.fetch_sub(1)) { + // tile node is ready for execution, add it to the queue + q.push(dependant); + ++ready_items; + } + } + return ready_items; + } + + struct exec_ctx { + tbb::task_arena& arena; + prio_items_queue_t& q; + tbb::task_group_context tg_ctx; + tasking::root_t root; + detail::async::async_tasks_t async_tasks; + std::atomic executed {0}; + + exec_ctx(tbb::task_arena& arena_, prio_items_queue_t& q_) + : arena(arena_), q(q_), + // As the traits is last argument, explicitly specify (default) value for first argument + tg_ctx{tbb::task_group_context::bound, tasking::tg_context_traits()}, + root(tasking::create_root(tg_ctx)) + {} + }; + + // At the moment there are no suitable tools to manage TBB priorities on task by task basis. + // Instead priority queue is used to respect tile_node priorities. + // As well, TBB task is not bound to any particular tile_node until actually executed. + + // Strictly speaking there are two graphs here: + // - G-API one, described by the connected tile_node instances. + // This graph is : + // - Known beforehand, and do not change during the execution (i.e. static) + // - Contains both TBB non-TBB parts + // - prioritized, (i.e. all nodes has assigned priority of execution) + // + // - TBB task tree, which is : + // - flat (Has only two levels : root and leaves) + // - dynamic, i.e. new leaves are added on demand when new tbb tasks are spawned + // - describes only TBB/CPU part of the whole graph + // - non-prioritized (i.e. all tasks are created equal) + + // Class below represents TBB task payload. + // + // Each instance basically does the three things : + // 1. Gets the tile_node item from the top of the queue + // 2. Executes its body + // 3. Pushes dependent tile_nodes to the queue once they are ready + // + struct task_body { + exec_ctx& ctx; + + std::size_t push_ready_dependants(tile_node* node) const { + return graph::push_ready_dependants(ctx.q, node); + } + + void spawn_clones(std::size_t items) const { + tasking::batch_spawn(items, ctx.root.get(), *this); + } + + task_body(exec_ctx& ctx_) : ctx(ctx_) {} + tasking::use_tbb_scheduler_bypass operator()() const { + ASSERT(!ctx.q.empty() && "Spawned task with no job to do ? "); + + tile_node* node = detail::pop(ctx.q); + + auto result = tasking::use_tbb_scheduler_bypass::NO; + // execute the task + + if (auto p = util::get_if(&(node->task_body))) { + // synchronous task + p->body(); + + std::size_t ready_items = push_ready_dependants(node); + + if (ready_items > 0) { + // spawn one less tasks and say TBB to reuse(recycle) current task + spawn_clones(ready_items - 1); + result = tasking::use_tbb_scheduler_bypass::YES; + } + } + else { + LOG_DEBUG(NULL, "Async task"); + using namespace detail::async; + using util::copy_through_move; + + auto block_master = copy_through_move(lock_sleep_master(ctx.async_tasks)); + + auto self_copy = *this; + auto callback = [node, block_master, self_copy] () mutable /*due to block_master.get().unlock()*/ { + LOG_DEBUG(NULL, "Async task callback is called"); + // Implicitly unlock master right in the end of callback + auto master_sleep_lock = std::move(block_master); + std::size_t ready_items = self_copy.push_ready_dependants(node); + if (ready_items > 0) { + auto master_was_active = is_tbb_work_present::NO; + { + GAPI_ITT_AUTO_TRACE_GUARD(ittTbbEnqueueSpawnReadyBlocks); + // Force master thread (one that does wait_for_all()) to (actively) wait for enqueued tasks + // and unlock it right after all dependent tasks are spawned. + + auto root_wait_lock = copy_through_move(lock_wait_master(self_copy.ctx.root, master_was_active)); + + // TODO: add test to cover proper holding of root_wait_lock + // As the calling thread most likely is not TBB one, instead of spawning TBB tasks directly we + // enqueue a task which will spawn them. + // For master thread to not leave wait_for_all() prematurely, + // hold the root_wait_lock until need tasks are actually spawned. + self_copy.ctx.arena.enqueue([ready_items, self_copy, root_wait_lock]() { + self_copy.spawn_clones(ready_items); + // TODO: why we need this? Either write a descriptive comment or remove it + volatile auto unused = root_wait_lock.get().guard.get(); + util::suppress_unused_warning(unused); + }); + } + // Wake master thread (if any) to pick up the enqueued tasks iff: + // 1. there is new TBB work to do, and + // 2. Master thread was sleeping on condition variable waiting for async tasks to complete + // (There was no active work before (i.e. root->ref_count() was == 1)) + auto wake_master = (master_was_active == is_tbb_work_present::NO) ? + wake_tbb_master::YES : wake_tbb_master::NO; + master_sleep_lock.get().unlock(wake_master); + } + }; + + auto& body = util::get(node->task_body).body; + body(std::move(callback), node->total_order_index); + } + + ctx.executed++; + // reset dependecy_count to initial state to simplify re-execution of the same graph + node->dependency_count = node->dependencies; + + return result; + } + }; +} +} // namespace detail +}}} // namespace cv::gimpl::parallel + +void cv::gimpl::parallel::execute(prio_items_queue_t& q) { + // get the reference to current task_arena (i.e. one we are running in) +#if TBB_INTERFACE_VERSION > 9002 + using attach_t = tbb::task_arena::attach; +#else + using attach_t = tbb::internal::attach; +#endif + + tbb::task_arena arena{attach_t{}}; + execute(q, arena); +} + +void cv::gimpl::parallel::execute(prio_items_queue_t& q, tbb::task_arena& arena) { + using namespace detail; + graph::exec_ctx ctx{arena, q}; + + arena.execute( + [&]() { + // Passed in queue is assumed to contain starting tasks, i.e. ones with no (or resolved) dependencies + auto num_start_tasks = q.size(); + + // TODO: use recursive spawning and task soft affinity for faster task distribution + // As graph is starting and no task has been spawned yet + // assert_graph_is_running(root) will not hold, so spawn without assert + tasking::batch_spawn(num_start_tasks, ctx.root.get(), graph::task_body{ctx}, /* assert_graph_is_running*/false); + + using namespace std::chrono; + high_resolution_clock timer; + + auto tbb_work_done = [&ctx]() { return 1 == ctx.root->ref_count(); }; + auto async_work_done = [&ctx]() { return 0 == ctx.async_tasks.count; }; + do { + // First participate in execution of TBB graph till there are no more ready tasks. + ctx.root->wait_for_all(); + + if (!async_work_done()) { // Wait on the conditional variable iff there is active async work + auto start = timer.now(); + std::unique_lock lk(ctx.async_tasks.mtx); + // Wait (probably by sleeping) until all async tasks are completed or new TBB tasks are created. + // FIXME: Use TBB resumable tasks here to avoid blocking TBB thread + ctx.async_tasks.cv.wait(lk, [&]{return async_work_done() || !tbb_work_done() ;}); + + LOG_INFO(NULL, "Slept for " << duration_cast(timer.now() - start).count() << " ms \n"); + } + } + while(!tbb_work_done() || !async_work_done()); + + ASSERT(tbb_work_done() && async_work_done() && "Graph is still running?"); + } + ); + + LOG_INFO(NULL, "Done. Executed " << ctx.executed << " tasks"); +} + +std::ostream& cv::gimpl::parallel::operator<<(std::ostream& o, tile_node const& n) { + o << "(" + << " at:" << &n << "," + << "indx: " << n.total_order_index << "," + << "deps #:" << n.dependency_count.value << ", " + << "prods:" << n.dependants.size(); + + o << "["; + for (auto* d: n.dependants) { + o << d << ","; + } + o << "]"; + + o << ")"; + return o; +} + +#endif // HAVE_TBB diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/executor/gtbbexecutor.hpp b/inference-engine/thirdparty/fluid/modules/gapi/src/executor/gtbbexecutor.hpp new file mode 100644 index 00000000000..8a62266f66c --- /dev/null +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/executor/gtbbexecutor.hpp @@ -0,0 +1,103 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Copyright (C) 2020 Intel Corporation + +#ifndef OPENCV_GAPI_TBB_EXECUTOR_HPP +#define OPENCV_GAPI_TBB_EXECUTOR_HPP + +#if !defined(GAPI_STANDALONE) +#include +#endif + +#if defined(HAVE_TBB) + +#include +#include +#include +#include + +#include +#include + +#include + +namespace cv { namespace gimpl { namespace parallel { + +// simple wrapper to allow copies of std::atomic +template +struct atomic_copyable_wrapper { + std::atomic value; + + atomic_copyable_wrapper(count_t val) : value(val) {} + atomic_copyable_wrapper(atomic_copyable_wrapper const& lhs) : value (lhs.value.load(std::memory_order_relaxed)) {} + + atomic_copyable_wrapper& operator=(count_t val) { + value.store(val, std::memory_order_relaxed); + return *this; + } + + count_t fetch_sub(count_t val) { + return value.fetch_sub(val); + } + + count_t fetch_add(count_t val) { + return value.fetch_add(val); + } +}; + +struct async_tag {}; +constexpr async_tag async; + +// Class describing a piece of work in the node in the tasks graph. +// Most of the fields are set only once during graph compilation and never changes. +// (However at the moment they can not be made const due to two phase initialization +// of the tile_node objects) +// FIXME: refactor the code to make the const? +struct tile_node { + // place in totally ordered queue of tasks to execute. Inverse to priority, i.e. + // lower index means higher priority + size_t total_order_index = 0; + + // FIXME: use templates here instead of std::function + struct sync_task_body { + std::function body; + }; + struct async_task_body { + std::function&& callback, size_t total_order_index)> body; + }; + + util::variant task_body; + + // number of dependencies according to a dependency graph (i.e. number of "input" edges). + size_t dependencies = 0; + + // number of unsatisfied dependencies. When drops to zero task is ready for execution. + // Initially equal to "dependencies" + atomic_copyable_wrapper dependency_count = 0; + + std::vector dependants; + + tile_node(decltype(sync_task_body::body)&& f) : task_body(sync_task_body{std::move(f)}) {}; + tile_node(async_tag, decltype(async_task_body::body)&& f) : task_body(async_task_body{std::move(f)}) {}; +}; + +std::ostream& operator<<(std::ostream& o, tile_node const& n); + +struct tile_node_indirect_priority_comparator { + bool operator()(tile_node const * lhs, tile_node const * rhs) const { + return lhs->total_order_index > rhs->total_order_index; + } +}; + +using prio_items_queue_t = tbb::concurrent_priority_queue; + +void execute(prio_items_queue_t& q); +void execute(prio_items_queue_t& q, tbb::task_arena& arena); + +}}} // namespace cv::gimpl::parallel + +#endif // HAVE_TBB + +#endif // OPENCV_GAPI_TBB_EXECUTOR_HPP diff --git a/inference-engine/thirdparty/fluid/modules/gapi/src/executor/last_value.hpp b/inference-engine/thirdparty/fluid/modules/gapi/src/executor/last_value.hpp new file mode 100644 index 00000000000..152449a879a --- /dev/null +++ b/inference-engine/thirdparty/fluid/modules/gapi/src/executor/last_value.hpp @@ -0,0 +1,105 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Copyright (C) 2020 Intel Corporation + +#ifndef OPENCV_GAPI_EXECUTOR_LAST_VALUE_HPP +#define OPENCV_GAPI_EXECUTOR_LAST_VALUE_HPP + +#include +#include + +#include +#include + +namespace cv { +namespace gapi { +namespace own { + +// This class implements a "Last Written Value" thing. Writer threads +// (in our case, it is just one) can write as many values there as it +// can. +// +// The reader thread gets only a value it gets at the time (or blocks +// if there was no value written since the last read). +// +// Again, the implementation is highly inefficient right now. +template +class last_written_value { + cv::util::optional m_data; + + std::mutex m_mutex; + std::condition_variable m_cond_empty; + + void unsafe_pop(T &t); + +public: + last_written_value() {} + last_written_value(const last_written_value &cc) + : m_data(cc.m_data) { + // FIXME: what to do with all that locks, etc? + } + last_written_value(last_written_value &&cc) + : m_data(std::move(cc.m_data)) { + // FIXME: what to do with all that locks, etc? + } + + // FIXME: && versions + void push(const T &t); + void pop(T &t); + bool try_pop(T &t); + + // Not thread-safe + void clear(); +}; + +// Internal: do shared pop things assuming the lock is already there +template +void last_written_value::unsafe_pop(T &t) { + GAPI_Assert(m_data.has_value()); + t = std::move(m_data.value()); + m_data.reset(); +} + +// Push an element to the queue. Blocking if there's no space left +template +void last_written_value::push(const T& t) { + std::unique_lock lock(m_mutex); + m_data = cv::util::make_optional(t); + lock.unlock(); + m_cond_empty.notify_one(); +} + +// Pop an element from the queue. Blocking if there's no items +template +void last_written_value::pop(T &t) { + std::unique_lock lock(m_mutex); + if (!m_data.has_value()) { + // if there is no data, wait + m_cond_empty.wait(lock, [&](){return m_data.has_value();}); + } + unsafe_pop(t); +} + +// Try pop an element from the queue. Returns false if queue is empty +template +bool last_written_value::try_pop(T &t) { + std::unique_lock lock(m_mutex); + if (!m_data.has_value()) { + // if there is no data, return + return false; + } + unsafe_pop(t); + return true; +} + +// Clear the value holder. This method is not thread-safe. +template +void last_written_value::clear() { + m_data.reset(); +} + +}}} // namespace cv::gapi::own + +#endif // OPENCV_GAPI_EXECUTOR_CONC_QUEUE_HPP diff --git a/inference-engine/thirdparty/fluid/modules/gapi/test/common/gapi_core_tests.hpp b/inference-engine/thirdparty/fluid/modules/gapi/test/common/gapi_core_tests.hpp index 308f88622bb..48ac4482a79 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/test/common/gapi_core_tests.hpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/test/common/gapi_core_tests.hpp @@ -97,6 +97,7 @@ GAPI_TEST_FIXTURE(MaxTest, initMatsRandU, <>, 0) GAPI_TEST_FIXTURE(AbsDiffTest, initMatsRandU, <>, 0) GAPI_TEST_FIXTURE(AbsDiffCTest, initMatsRandU, <>, 0) GAPI_TEST_FIXTURE(SumTest, initMatrixRandU, FIXTURE_API(CompareScalars), 1, cmpF) +GAPI_TEST_FIXTURE(CountNonZeroTest, initMatrixRandU, FIXTURE_API(CompareScalars), 1, cmpF) GAPI_TEST_FIXTURE(AddWeightedTest, initMatsRandU, FIXTURE_API(CompareMats), 1, cmpF) GAPI_TEST_FIXTURE(NormTest, initMatrixRandU, FIXTURE_API(CompareScalars,NormTypes), 2, cmpF, opType) @@ -150,13 +151,23 @@ GAPI_TEST_FIXTURE(WarpPerspectiveTest, initMatrixRandU, GAPI_TEST_FIXTURE(WarpAffineTest, initMatrixRandU, FIXTURE_API(CompareMats, double , double, int, int, cv::Scalar), 6, cmpF, angle, scale, flags, border_mode, border_value) +GAPI_TEST_FIXTURE(KMeansNDNoInitTest, initMatrixRandU, FIXTURE_API(int, cv::KmeansFlags), + 2, K, flags) +GAPI_TEST_FIXTURE(KMeansNDInitTest, initMatrixRandU, + FIXTURE_API(CompareMats, int, cv::KmeansFlags), 3, cmpF, K, flags) +GAPI_TEST_FIXTURE(KMeans2DNoInitTest, initNothing, FIXTURE_API(int, cv::KmeansFlags), + 2, K, flags) +GAPI_TEST_FIXTURE(KMeans2DInitTest, initNothing, FIXTURE_API(int, cv::KmeansFlags), 2, K, flags) +GAPI_TEST_FIXTURE(KMeans3DNoInitTest, initNothing, FIXTURE_API(int, cv::KmeansFlags), + 2, K, flags) +GAPI_TEST_FIXTURE(KMeans3DInitTest, initNothing, FIXTURE_API(int, cv::KmeansFlags), 2, K, flags) GAPI_TEST_EXT_BASE_FIXTURE(ParseSSDBLTest, ParserSSDTest, initNothing, FIXTURE_API(float, int), 2, confidence_threshold, filter_label) GAPI_TEST_EXT_BASE_FIXTURE(ParseSSDTest, ParserSSDTest, initNothing, FIXTURE_API(float, bool, bool), 3, confidence_threshold, alignment_to_square, filter_out_of_bounds) GAPI_TEST_EXT_BASE_FIXTURE(ParseYoloTest, ParserYoloTest, initNothing, - FIXTURE_API(float, float, int), 3, confidence_threshold, nms_threshold, num_classes) + FIXTURE_API(float, float, int, std::pair), 4, confidence_threshold, nms_threshold, num_classes, dims_config) GAPI_TEST_FIXTURE(SizeTest, initMatrixRandU, <>, 0) GAPI_TEST_FIXTURE(SizeRTest, initNothing, <>, 0) } // opencv_test diff --git a/inference-engine/thirdparty/fluid/modules/gapi/test/common/gapi_core_tests_inl.hpp b/inference-engine/thirdparty/fluid/modules/gapi/test/common/gapi_core_tests_inl.hpp index 331ef70a3bd..2a34fbef66b 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/test/common/gapi_core_tests_inl.hpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/test/common/gapi_core_tests_inl.hpp @@ -2,7 +2,7 @@ // It is subject to the license terms in the LICENSE file found in the top-level directory // of this distribution and at http://opencv.org/license.html. // -// Copyright (C) 2018-2019 Intel Corporation +// Copyright (C) 2018-2020 Intel Corporation #ifndef OPENCV_GAPI_CORE_TESTS_INL_HPP @@ -15,6 +15,16 @@ namespace opencv_test { +namespace +{ +template +inline bool compareVectorsAbsExact(const std::vector& outGAPI, + const std::vector& outOCV) +{ + return AbsExactVector().to_compare_f()(outGAPI, outOCV); +} +} + TEST_P(MathOpTest, MatricesAccuracyTest) { // G-API code & corresponding OpenCV code //////////////////////////////// @@ -614,6 +624,30 @@ TEST_P(SumTest, AccuracyTest) } } +#pragma push_macro("countNonZero") +#undef countNonZero +TEST_P(CountNonZeroTest, AccuracyTest) +{ + int out_cnz_gapi = -1; + int out_cnz_ocv = -2; + + // G-API code ////////////////////////////////////////////////////////////// + cv::GMat in; + auto out = cv::gapi::countNonZero(in); + + cv::GComputation c(cv::GIn(in), cv::GOut(out)); + c.apply(cv::gin(in_mat1), cv::gout(out_cnz_gapi), getCompileArgs()); + // OpenCV code ///////////////////////////////////////////////////////////// + { + out_cnz_ocv = cv::countNonZero(in_mat1); + } + // Comparison ////////////////////////////////////////////////////////////// + { + EXPECT_TRUE(cmpF(out_cnz_gapi, out_cnz_ocv)); + } +} +#pragma pop_macro("countNonZero") + TEST_P(AddWeightedTest, AccuracyTest) { auto& rng = cv::theRNG(); @@ -1353,6 +1387,187 @@ TEST_P(NormalizeTest, Test) } } +TEST_P(KMeansNDNoInitTest, AccuracyTest) +{ + const int amount = sz.height != 1 ? sz.height : sz.width, + dim = sz.height != 1 ? sz.width : (type >> CV_CN_SHIFT) + 1; + // amount of channels + const cv::TermCriteria criteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 30, 0); + const int attempts = 1; + double compact_gapi = -1.; + cv::Mat labels_gapi, centers_gapi; + // G-API code ////////////////////////////////////////////////////////////// + cv::GMat in; + cv::GOpaque compactness; + cv::GMat outLabels, centers; + std::tie(compactness, outLabels, centers) = cv::gapi::kmeans(in, K, criteria, attempts, flags); + cv::GComputation c(cv::GIn(in), cv::GOut(compactness, outLabels, centers)); + c.apply(cv::gin(in_mat1), cv::gout(compact_gapi, labels_gapi, centers_gapi), getCompileArgs()); + // Validation ////////////////////////////////////////////////////////////// + { + EXPECT_GE(compact_gapi, 0.); + EXPECT_EQ(labels_gapi.cols, 1); + EXPECT_EQ(labels_gapi.rows, amount); + EXPECT_FALSE(labels_gapi.empty()); + EXPECT_EQ(centers_gapi.cols, dim); + EXPECT_EQ(centers_gapi.rows, K); + EXPECT_FALSE(centers_gapi.empty()); + } +} + +TEST_P(KMeansNDInitTest, AccuracyTest) +{ + const int amount = sz.height != 1 ? sz.height : sz.width; + const cv::TermCriteria criteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 30, 0); + const int attempts = 1; + cv::Mat bestLabels(cv::Size{1, amount}, CV_32SC1); + double compact_ocv = -1., compact_gapi = -1.; + cv::Mat labels_ocv, labels_gapi, centers_ocv, centers_gapi; + cv::randu(bestLabels, 0, K); + bestLabels.copyTo(labels_ocv); + // G-API code ////////////////////////////////////////////////////////////// + cv::GMat in, inLabels; + cv::GOpaque compactness; + cv::GMat outLabels, centers; + std::tie(compactness, outLabels, centers) = + cv::gapi::kmeans(in, K, inLabels, criteria, attempts, flags); + cv::GComputation c(cv::GIn(in, inLabels), cv::GOut(compactness, outLabels, centers)); + c.apply(cv::gin(in_mat1, bestLabels), cv::gout(compact_gapi, labels_gapi, centers_gapi), + getCompileArgs()); + // OpenCV code ///////////////////////////////////////////////////////////// + compact_ocv = cv::kmeans(in_mat1, K, labels_ocv, criteria, attempts, flags, centers_ocv); + // Comparison ////////////////////////////////////////////////////////////// + { + EXPECT_TRUE(compact_gapi == compact_ocv); + EXPECT_TRUE(cmpF(labels_gapi, labels_ocv)); + EXPECT_TRUE(cmpF(centers_gapi, centers_ocv)); + } +} + +TEST_P(KMeans2DNoInitTest, AccuracyTest) +{ + const int amount = sz.height; + const cv::TermCriteria criteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 30, 0); + const int attempts = 1; + std::vector in_vector{}; + double compact_gapi = -1.; + std::vector labels_gapi{}; + std::vector centers_gapi{}; + initPointsVectorRandU(amount, in_vector); + // G-API code ////////////////////////////////////////////////////////////// + cv::GArray in; + cv::GArray inLabels(std::vector{}); + cv::GOpaque compactness; + cv::GArray outLabels; + cv::GArray centers; + std::tie(compactness, outLabels, centers) = + cv::gapi::kmeans(in, K, inLabels, criteria, attempts, flags); + cv::GComputation c(cv::GIn(in), cv::GOut(compactness, outLabels, centers)); + c.apply(cv::gin(in_vector), cv::gout(compact_gapi, labels_gapi, centers_gapi), getCompileArgs()); + // Validation ////////////////////////////////////////////////////////////// + { + EXPECT_GE(compact_gapi, 0.); + EXPECT_EQ(labels_gapi.size(), static_cast(amount)); + EXPECT_EQ(centers_gapi.size(), static_cast(K)); + } +} + +TEST_P(KMeans2DInitTest, AccuracyTest) +{ + const int amount = sz.height; + const cv::TermCriteria criteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 30, 0); + const int attempts = 1; + std::vector in_vector{}; + std::vector bestLabels(amount); + double compact_ocv = -1., compact_gapi = -1.; + std::vector labels_ocv{}, labels_gapi{}; + std::vector centers_ocv{}, centers_gapi{}; + initPointsVectorRandU(amount, in_vector); + cv::randu(bestLabels, 0, K); + labels_ocv = bestLabels; + // G-API code ////////////////////////////////////////////////////////////// + cv::GArray in; + cv::GArray inLabels; + cv::GOpaque compactness; + cv::GArray outLabels; + cv::GArray centers; + std::tie(compactness, outLabels, centers) = + cv::gapi::kmeans(in, K, inLabels, criteria, attempts, flags); + cv::GComputation c(cv::GIn(in, inLabels), cv::GOut(compactness, outLabels, centers)); + c.apply(cv::gin(in_vector, bestLabels), cv::gout(compact_gapi, labels_gapi, centers_gapi), + getCompileArgs()); + // OpenCV code ///////////////////////////////////////////////////////////// + compact_ocv = cv::kmeans(in_vector, K, labels_ocv, criteria, attempts, flags, centers_ocv); + // Comparison ////////////////////////////////////////////////////////////// + { + EXPECT_TRUE(compact_gapi == compact_ocv); + EXPECT_TRUE(compareVectorsAbsExact(labels_gapi, labels_ocv)); + EXPECT_TRUE(compareVectorsAbsExact(centers_gapi, centers_ocv)); + } +} + +TEST_P(KMeans3DNoInitTest, AccuracyTest) +{ + const int amount = sz.height; + const cv::TermCriteria criteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 30, 0); + const int attempts = 1; + std::vector in_vector{}; + double compact_gapi = -1.; + std::vector labels_gapi{}; + std::vector centers_gapi{}; + initPointsVectorRandU(amount, in_vector); + // G-API code ////////////////////////////////////////////////////////////// + cv::GArray in; + cv::GArray inLabels(std::vector{}); + cv::GOpaque compactness; + cv::GArray outLabels; + cv::GArray centers; + std::tie(compactness, outLabels, centers) = + cv::gapi::kmeans(in, K, inLabels, criteria, attempts, flags); + cv::GComputation c(cv::GIn(in), cv::GOut(compactness, outLabels, centers)); + c.apply(cv::gin(in_vector), cv::gout(compact_gapi, labels_gapi, centers_gapi), getCompileArgs()); + // Validation ////////////////////////////////////////////////////////////// + { + EXPECT_GE(compact_gapi, 0.); + EXPECT_EQ(labels_gapi.size(), static_cast(amount)); + EXPECT_EQ(centers_gapi.size(), static_cast(K)); + } +} + +TEST_P(KMeans3DInitTest, AccuracyTest) +{ + const int amount = sz.height; + const cv::TermCriteria criteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 30, 0); + const int attempts = 1; + std::vector in_vector{}; + std::vector bestLabels(amount); + double compact_ocv = -1., compact_gapi = -1.; + std::vector labels_ocv{}, labels_gapi{}; + std::vector centers_ocv{}, centers_gapi{}; + initPointsVectorRandU(amount, in_vector); + cv::randu(bestLabels, 0, K); + labels_ocv = bestLabels; + // G-API code ////////////////////////////////////////////////////////////// + cv::GArray in; + cv::GArray inLabels; + cv::GOpaque compactness; + cv::GArray outLabels; + cv::GArray centers; + std::tie(compactness, outLabels, centers) = + cv::gapi::kmeans(in, K, inLabels, criteria, attempts, flags); + cv::GComputation c(cv::GIn(in, inLabels), cv::GOut(compactness, outLabels, centers)); + c.apply(cv::gin(in_vector, bestLabels), cv::gout(compact_gapi, labels_gapi, centers_gapi), + getCompileArgs()); + // OpenCV code ///////////////////////////////////////////////////////////// + compact_ocv = cv::kmeans(in_vector, K, labels_ocv, criteria, attempts, flags, centers_ocv); + // Comparison ////////////////////////////////////////////////////////////// + { + EXPECT_TRUE(compact_gapi == compact_ocv); + EXPECT_TRUE(compareVectorsAbsExact(labels_gapi, labels_ocv)); + EXPECT_TRUE(compareVectorsAbsExact(centers_gapi, centers_ocv)); + } +} + // PLEASE DO NOT PUT NEW ACCURACY TESTS BELOW THIS POINT! ////////////////////// TEST_P(BackendOutputAllocationTest, EmptyOutput) @@ -1642,7 +1857,7 @@ TEST_P(ParseSSDTest, ParseTest) TEST_P(ParseYoloTest, ParseTest) { - cv::Mat in_mat = generateYoloOutput(num_classes); + cv::Mat in_mat = generateYoloOutput(num_classes, dims_config); auto anchors = cv::gapi::nn::parsers::GParseYolo::defaultAnchors(); std::vector boxes_gapi, boxes_ref; std::vector labels_gapi, labels_ref; @@ -1667,7 +1882,7 @@ TEST_P(SizeTest, ParseTest) cv::GMat in; cv::Size out_sz; - auto out = cv::gapi::size(in); + auto out = cv::gapi::streaming::size(in); cv::GComputation c(cv::GIn(in), cv::GOut(out)); c.apply(cv::gin(in_mat1), cv::gout(out_sz), getCompileArgs()); @@ -1680,7 +1895,7 @@ TEST_P(SizeRTest, ParseTest) cv::Size out_sz; cv::GOpaque op_rect; - auto out = cv::gapi::size(op_rect); + auto out = cv::gapi::streaming::size(op_rect); cv::GComputation c(cv::GIn(op_rect), cv::GOut(out)); c.apply(cv::gin(rect), cv::gout(out_sz), getCompileArgs()); diff --git a/inference-engine/thirdparty/fluid/modules/gapi/test/common/gapi_imgproc_tests.hpp b/inference-engine/thirdparty/fluid/modules/gapi/test/common/gapi_imgproc_tests.hpp index cd074efda09..2d929f12ec6 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/test/common/gapi_imgproc_tests.hpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/test/common/gapi_imgproc_tests.hpp @@ -46,6 +46,8 @@ GAPI_TEST_FIXTURE(Erode3x3Test, initMatrixRandN, FIXTURE_API(CompareMats,int), 2 GAPI_TEST_FIXTURE(DilateTest, initMatrixRandN, FIXTURE_API(CompareMats,int,int), 3, cmpF, kernSize, kernType) GAPI_TEST_FIXTURE(Dilate3x3Test, initMatrixRandN, FIXTURE_API(CompareMats,int), 2, cmpF, numIters) +GAPI_TEST_FIXTURE(MorphologyExTest, initMatrixRandN, FIXTURE_API(CompareMats,MorphTypes), + 2, cmpF, op) GAPI_TEST_FIXTURE(SobelTest, initMatrixRandN, FIXTURE_API(CompareMats,int,int,int), 4, cmpF, kernSize, dx, dy) GAPI_TEST_FIXTURE(SobelXYTest, initMatrixRandN, FIXTURE_API(CompareMats,int,int,int,int), 5, @@ -64,9 +66,45 @@ GAPI_TEST_FIXTURE_SPEC_PARAMS(GoodFeaturesTest, double,int,bool), 8, cmpF, fileName, type, maxCorners, qualityLevel, minDistance, blockSize, useHarrisDetector) +GAPI_TEST_FIXTURE_SPEC_PARAMS(FindContoursNoOffsetTest, + FIXTURE_API(cv::Size,MatType2,cv::RetrievalModes, + cv::ContourApproximationModes), + 4, sz, type, mode, method) +GAPI_TEST_FIXTURE_SPEC_PARAMS(FindContoursOffsetTest, <>, 0) +GAPI_TEST_FIXTURE_SPEC_PARAMS(FindContoursHNoOffsetTest, + FIXTURE_API(cv::Size,MatType2,cv::RetrievalModes, + cv::ContourApproximationModes), + 4, sz, type, mode, method) +GAPI_TEST_FIXTURE_SPEC_PARAMS(FindContoursHOffsetTest, <>, 0) +GAPI_TEST_FIXTURE(BoundingRectMatTest, initMatrixRandU, FIXTURE_API(CompareRects), 1, cmpF) +GAPI_TEST_FIXTURE(BoundingRectMatVector32STest, initNothing, FIXTURE_API(CompareRects), 1, cmpF) +GAPI_TEST_FIXTURE(BoundingRectMatVector32FTest, initNothing, FIXTURE_API(CompareRects), 1, cmpF) +GAPI_TEST_FIXTURE(BoundingRectVector32STest, initNothing, FIXTURE_API(CompareRects), 1, cmpF) +GAPI_TEST_FIXTURE(BoundingRectVector32FTest, initNothing, FIXTURE_API(CompareRects), 1, cmpF) +GAPI_TEST_FIXTURE(FitLine2DMatVectorTest, initMatByPointsVectorRandU, + FIXTURE_API(CompareVecs,cv::DistanceTypes), 2, cmpF, distType) +GAPI_TEST_FIXTURE(FitLine2DVector32STest, initNothing, + FIXTURE_API(CompareVecs,cv::DistanceTypes), 2, cmpF, distType) +GAPI_TEST_FIXTURE(FitLine2DVector32FTest, initNothing, + FIXTURE_API(CompareVecs,cv::DistanceTypes), 2, cmpF, distType) +GAPI_TEST_FIXTURE(FitLine2DVector64FTest, initNothing, + FIXTURE_API(CompareVecs,cv::DistanceTypes), 2, cmpF, distType) +GAPI_TEST_FIXTURE(FitLine3DMatVectorTest, initMatByPointsVectorRandU, + FIXTURE_API(CompareVecs,cv::DistanceTypes), 2, cmpF, distType) +GAPI_TEST_FIXTURE(FitLine3DVector32STest, initNothing, + FIXTURE_API(CompareVecs,cv::DistanceTypes), 2, cmpF, distType) +GAPI_TEST_FIXTURE(FitLine3DVector32FTest, initNothing, + FIXTURE_API(CompareVecs,cv::DistanceTypes), 2, cmpF, distType) +GAPI_TEST_FIXTURE(FitLine3DVector64FTest, initNothing, + FIXTURE_API(CompareVecs,cv::DistanceTypes), 2, cmpF, distType) +GAPI_TEST_FIXTURE(BGR2RGBTest, initMatrixRandN, FIXTURE_API(CompareMats), 1, cmpF) GAPI_TEST_FIXTURE(RGB2GrayTest, initMatrixRandN, FIXTURE_API(CompareMats), 1, cmpF) GAPI_TEST_FIXTURE(BGR2GrayTest, initMatrixRandN, FIXTURE_API(CompareMats), 1, cmpF) GAPI_TEST_FIXTURE(RGB2YUVTest, initMatrixRandN, FIXTURE_API(CompareMats), 1, cmpF) +GAPI_TEST_FIXTURE(BGR2I420Test, initMatrixRandN, FIXTURE_API(CompareMats), 1, cmpF) +GAPI_TEST_FIXTURE(RGB2I420Test, initMatrixRandN, FIXTURE_API(CompareMats), 1, cmpF) +GAPI_TEST_FIXTURE(I4202BGRTest, initMatrixRandN, FIXTURE_API(CompareMats), 1, cmpF) +GAPI_TEST_FIXTURE(I4202RGBTest, initMatrixRandN, FIXTURE_API(CompareMats), 1, cmpF) GAPI_TEST_FIXTURE(YUV2RGBTest, initMatrixRandN, FIXTURE_API(CompareMats), 1, cmpF) GAPI_TEST_FIXTURE(YUV2GrayTest, initMatrixRandN, FIXTURE_API(CompareMats), 1, cmpF) GAPI_TEST_FIXTURE(NV12toRGBTest, initMatrixRandN, FIXTURE_API(CompareMats), 1, cmpF) diff --git a/inference-engine/thirdparty/fluid/modules/gapi/test/common/gapi_imgproc_tests_inl.hpp b/inference-engine/thirdparty/fluid/modules/gapi/test/common/gapi_imgproc_tests_inl.hpp index 4aadc17d5dc..2a4f2e64ea5 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/test/common/gapi_imgproc_tests_inl.hpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/test/common/gapi_imgproc_tests_inl.hpp @@ -50,6 +50,27 @@ namespace rgb2yuyv(in_line_p, out_line_p, in.cols); } } + + // Draw random ellipses on given mat of given size and type + void initMatForFindingContours(cv::Mat& mat, const cv::Size& sz, const int type) + { + cv::RNG& rng = theRNG(); + mat = cv::Mat(sz, type, cv::Scalar::all(0)); + size_t numEllipses = rng.uniform(1, 10); + + for( size_t i = 0; i < numEllipses; i++ ) + { + cv::Point center; + cv::Size axes; + center.x = rng.uniform(0, sz.width); + center.y = rng.uniform(0, sz.height); + axes.width = rng.uniform(2, sz.width); + axes.height = rng.uniform(2, sz.height); + int color = rng.uniform(1, 256); + double angle = rng.uniform(0., 180.); + cv::ellipse(mat, center, axes, angle, 0., 360., color, 1, FILLED); + } + } } TEST_P(Filter2DTest, AccuracyTest) @@ -290,6 +311,29 @@ TEST_P(Dilate3x3Test, AccuracyTest) } } +TEST_P(MorphologyExTest, AccuracyTest) +{ + MorphShapes defShape = cv::MORPH_RECT; + int defKernSize = 3; + cv::Mat kernel = cv::getStructuringElement(defShape, cv::Size(defKernSize, defKernSize)); + + // G-API code ////////////////////////////////////////////////////////////// + cv::GMat in; + auto out = cv::gapi::morphologyEx(in, op, kernel); + + cv::GComputation c(in, out); + c.apply(in_mat1, out_mat_gapi, getCompileArgs()); + // OpenCV code ///////////////////////////////////////////////////////////// + { + cv::morphologyEx(in_mat1, out_mat_ocv, op, kernel); + } + // Comparison ////////////////////////////////////////////////////////////// + { + EXPECT_TRUE(cmpF(out_mat_gapi, out_mat_ocv)); + EXPECT_EQ(out_mat_gapi.size(), sz); + } +} + TEST_P(SobelTest, AccuracyTest) { // G-API code ////////////////////////////////////////////////////////////// @@ -447,6 +491,472 @@ TEST_P(GoodFeaturesTest, AccuracyTest) } } +TEST_P(FindContoursNoOffsetTest, AccuracyTest) +{ + std::vector> outCtsOCV, outCtsGAPI; + + initMatForFindingContours(in_mat1, sz, type); + out_mat_gapi = cv::Mat(sz, type, cv::Scalar::all(0)); + out_mat_ocv = cv::Mat(sz, type, cv::Scalar::all(0)); + + // OpenCV code ///////////////////////////////////////////////////////////// + { + cv::findContours(in_mat1, outCtsOCV, mode, method); + } + + // G-API code ////////////////////////////////////////////////////////////// + cv::GMat in; + cv::GArray> outCts; + outCts = cv::gapi::findContours(in, mode, method); + cv::GComputation c(GIn(in), GOut(outCts)); + c.apply(gin(in_mat1), gout(outCtsGAPI), getCompileArgs()); + + // Comparison ////////////////////////////////////////////////////////////// + EXPECT_TRUE(outCtsGAPI.size() == outCtsOCV.size()); + cv::fillPoly(out_mat_ocv, outCtsOCV, cv::Scalar::all(1)); + cv::fillPoly(out_mat_gapi, outCtsGAPI, cv::Scalar::all(1)); + EXPECT_TRUE(AbsExact().to_compare_f()(out_mat_ocv, out_mat_gapi)); +} + +TEST_P(FindContoursOffsetTest, AccuracyTest) +{ + const cv::Size sz(1280, 720); + const MatType2 type = CV_8UC1; + const cv::RetrievalModes mode = cv::RETR_EXTERNAL; + const cv::ContourApproximationModes method = cv::CHAIN_APPROX_NONE; + const cv::Point offset(15, 15); + std::vector> outCtsOCV, outCtsGAPI; + + initMatForFindingContours(in_mat1, sz, type); + out_mat_gapi = cv::Mat(sz, type, cv::Scalar::all(0)); + out_mat_ocv = cv::Mat(sz, type, cv::Scalar::all(0)); + + // OpenCV code ///////////////////////////////////////////////////////////// + { + cv::findContours(in_mat1, outCtsOCV, mode, method, offset); + } + + // G-API code ////////////////////////////////////////////////////////////// + cv::GMat in; + GOpaque gOffset; + cv::GArray> outCts; + outCts = cv::gapi::findContours(in, mode, method, gOffset); + cv::GComputation c(GIn(in, gOffset), GOut(outCts)); + c.apply(gin(in_mat1, offset), gout(outCtsGAPI), getCompileArgs()); + + // Comparison ////////////////////////////////////////////////////////////// + EXPECT_TRUE(outCtsGAPI.size() == outCtsOCV.size()); + cv::fillPoly(out_mat_ocv, outCtsOCV, cv::Scalar::all(1)); + cv::fillPoly(out_mat_gapi, outCtsGAPI, cv::Scalar::all(1)); + EXPECT_TRUE(AbsExact().to_compare_f()(out_mat_ocv, out_mat_gapi)); +} + +TEST_P(FindContoursHNoOffsetTest, AccuracyTest) +{ + std::vector> outCtsOCV, outCtsGAPI; + std::vector outHierOCV, outHierGAPI; + + initMatForFindingContours(in_mat1, sz, type); + out_mat_gapi = cv::Mat(sz, type, cv::Scalar::all(0)); + out_mat_ocv = cv::Mat(sz, type, cv::Scalar::all(0)); + + // OpenCV code ///////////////////////////////////////////////////////////// + { + cv::findContours(in_mat1, outCtsOCV, outHierOCV, mode, method); + } + + // G-API code ////////////////////////////////////////////////////////////// + cv::GMat in; + cv::GArray> outCts; + cv::GArray outHier; + std::tie(outCts, outHier) = cv::gapi::findContoursH(in, mode, method); + cv::GComputation c(GIn(in), GOut(outCts, outHier)); + c.apply(gin(in_mat1), gout(outCtsGAPI, outHierGAPI), getCompileArgs()); + + // Comparison ////////////////////////////////////////////////////////////// + EXPECT_TRUE(outCtsGAPI.size() == outCtsOCV.size()); + cv::fillPoly(out_mat_ocv, outCtsOCV, cv::Scalar::all(1)); + cv::fillPoly(out_mat_gapi, outCtsGAPI, cv::Scalar::all(1)); + EXPECT_TRUE(AbsExact().to_compare_f()(out_mat_ocv, out_mat_gapi)); + + EXPECT_TRUE(outCtsGAPI.size() == outCtsOCV.size()); + EXPECT_TRUE(AbsExactVector().to_compare_f()(outHierOCV, outHierGAPI)); +} + +TEST_P(FindContoursHOffsetTest, AccuracyTest) +{ + const cv::Size sz(1280, 720); + const MatType2 type = CV_8UC1; + const cv::RetrievalModes mode = cv::RETR_EXTERNAL; + const cv::ContourApproximationModes method = cv::CHAIN_APPROX_NONE; + const cv::Point offset(15, 15); + std::vector> outCtsOCV, outCtsGAPI; + std::vector outHierOCV, outHierGAPI; + + initMatForFindingContours(in_mat1, sz, type); + out_mat_gapi = cv::Mat(sz, type, cv::Scalar::all(0)); + out_mat_ocv = cv::Mat(sz, type, cv::Scalar::all(0)); + + // OpenCV code ///////////////////////////////////////////////////////////// + { + cv::findContours(in_mat1, outCtsOCV, outHierOCV, mode, method, offset); + } + + // G-API code ////////////////////////////////////////////////////////////// + cv::GMat in; + GOpaque gOffset; + cv::GArray> outCts; + cv::GArray outHier; + std::tie(outCts, outHier) = cv::gapi::findContoursH(in, mode, method, gOffset); + cv::GComputation c(GIn(in, gOffset), GOut(outCts, outHier)); + c.apply(gin(in_mat1, offset), gout(outCtsGAPI, outHierGAPI), getCompileArgs()); + + // Comparison ////////////////////////////////////////////////////////////// + EXPECT_TRUE(outCtsGAPI.size() == outCtsOCV.size()); + cv::fillPoly(out_mat_ocv, outCtsOCV, cv::Scalar::all(1)); + cv::fillPoly(out_mat_gapi, outCtsGAPI, cv::Scalar::all(1)); + EXPECT_TRUE(AbsExact().to_compare_f()(out_mat_ocv, out_mat_gapi)); + + EXPECT_TRUE(outCtsGAPI.size() == outCtsOCV.size()); + EXPECT_TRUE(AbsExactVector().to_compare_f()(outHierOCV, outHierGAPI)); +} + +TEST_P(BoundingRectMatTest, AccuracyTest) +{ + cv::Rect out_rect_gapi, out_rect_ocv; + + // G-API code ////////////////////////////////////////////////////////////// + cv::GMat in; + auto out = cv::gapi::boundingRect(in); + + cv::GComputation c(cv::GIn(in), cv::GOut(out)); + c.apply(cv::gin(in_mat1), cv::gout(out_rect_gapi), getCompileArgs()); + // OpenCV code ///////////////////////////////////////////////////////////// + { + out_rect_ocv = cv::boundingRect(in_mat1); + } + // Comparison ////////////////////////////////////////////////////////////// + { + EXPECT_TRUE(cmpF(out_rect_gapi, out_rect_ocv)); + } +} + +TEST_P(BoundingRectMatVector32STest, AccuracyTest) +{ + cv::Rect out_rect_gapi, out_rect_ocv; + + std::vector in_vectorS(sz.width); + cv::randu(in_vectorS, cv::Scalar::all(0), cv::Scalar::all(255)); + in_mat1 = cv::Mat(in_vectorS); + + // G-API code ////////////////////////////////////////////////////////////// + cv::GMat in; + auto out = cv::gapi::boundingRect(in); + + cv::GComputation c(cv::GIn(in), cv::GOut(out)); + c.apply(cv::gin(in_mat1), cv::gout(out_rect_gapi), getCompileArgs()); + // OpenCV code ///////////////////////////////////////////////////////////// + { + out_rect_ocv = cv::boundingRect(in_mat1); + } + // Comparison ////////////////////////////////////////////////////////////// + { + EXPECT_TRUE(cmpF(out_rect_gapi, out_rect_ocv)); + } +} + +TEST_P(BoundingRectMatVector32FTest, AccuracyTest) +{ + cv::RNG& rng = theRNG(); + cv::Rect out_rect_gapi, out_rect_ocv; + + std::vector in_vectorF(sz.width); + const int fscale = 256; // avoid bits near ULP, generate stable test input + for (int i = 0; i < sz.width; i++) + { + cv::Point2f pt(rng.uniform(0, 255 * fscale) / static_cast(fscale), + rng.uniform(0, 255 * fscale) / static_cast(fscale)); + in_vectorF.push_back(pt); + } + in_mat1 = cv::Mat(in_vectorF); + + // G-API code ////////////////////////////////////////////////////////////// + cv::GMat in; + auto out = cv::gapi::boundingRect(in); + + cv::GComputation c(cv::GIn(in), cv::GOut(out)); + c.apply(cv::gin(in_mat1), cv::gout(out_rect_gapi), getCompileArgs()); + // OpenCV code ///////////////////////////////////////////////////////////// + { + out_rect_ocv = cv::boundingRect(in_mat1); + } + // Comparison ////////////////////////////////////////////////////////////// + { + EXPECT_TRUE(cmpF(out_rect_gapi, out_rect_ocv)); + } +} + + +TEST_P(BoundingRectVector32STest, AccuracyTest) +{ + cv::Rect out_rect_gapi, out_rect_ocv; + + std::vector in_vectorS(sz.width); + cv::randu(in_vectorS, cv::Scalar::all(0), cv::Scalar::all(255)); + + // G-API code ////////////////////////////////////////////////////////////// + cv::GArray in; + auto out = cv::gapi::boundingRect(in); + + cv::GComputation c(cv::GIn(in), cv::GOut(out)); + c.apply(cv::gin(in_vectorS), cv::gout(out_rect_gapi), getCompileArgs()); + // OpenCV code ///////////////////////////////////////////////////////////// + { + out_rect_ocv = cv::boundingRect(in_vectorS); + } + + // Comparison ////////////////////////////////////////////////////////////// + { + EXPECT_TRUE(cmpF(out_rect_gapi, out_rect_ocv)); + } +} + +TEST_P(BoundingRectVector32FTest, AccuracyTest) +{ + cv::RNG& rng = theRNG(); + cv::Rect out_rect_gapi, out_rect_ocv; + + std::vector in_vectorF(sz.width); + const int fscale = 256; // avoid bits near ULP, generate stable test input + for (int i = 0; i < sz.width; i++) + { + cv::Point2f pt(rng.uniform(0, 255 * fscale) / static_cast(fscale), + rng.uniform(0, 255 * fscale) / static_cast(fscale)); + in_vectorF.push_back(pt); + } + + // G-API code ////////////////////////////////////////////////////////////// + cv::GArray in; + auto out = cv::gapi::boundingRect(in); + + cv::GComputation c(cv::GIn(in), cv::GOut(out)); + c.apply(cv::gin(in_vectorF), cv::gout(out_rect_gapi), getCompileArgs()); + // OpenCV code ///////////////////////////////////////////////////////////// + { + out_rect_ocv = cv::boundingRect(in_vectorF); + } + + // Comparison ////////////////////////////////////////////////////////////// + { + EXPECT_TRUE(cmpF(out_rect_gapi, out_rect_ocv)); + } +} + +TEST_P(FitLine2DMatVectorTest, AccuracyTest) +{ + cv::Vec4f out_vec_gapi, out_vec_ocv; + double paramDefault = 0., repsDefault = 0., aepsDefault = 0.; + + // G-API code ////////////////////////////////////////////////////////////// + cv::GMat in; + auto out = cv::gapi::fitLine2D(in, distType, paramDefault, repsDefault, aepsDefault); + + cv::GComputation c(cv::GIn(in), cv::GOut(out)); + c.apply(cv::gin(in_mat1), cv::gout(out_vec_gapi), getCompileArgs()); + // OpenCV code ///////////////////////////////////////////////////////////// + { + cv::fitLine(in_mat1, out_vec_ocv, distType, paramDefault, repsDefault, aepsDefault); + } + // Comparison ////////////////////////////////////////////////////////////// + { + EXPECT_TRUE(cmpF(out_vec_gapi, out_vec_ocv)); + } +} + +TEST_P(FitLine2DVector32STest, AccuracyTest) +{ + cv::Vec4f out_vec_gapi, out_vec_ocv; + double paramDefault = 0., repsDefault = 0., aepsDefault = 0.; + + std::vector in_vec; + initPointsVectorRandU(sz.width, in_vec); + + // G-API code ////////////////////////////////////////////////////////////// + cv::GArray in; + auto out = cv::gapi::fitLine2D(in, distType, paramDefault, repsDefault, aepsDefault); + + cv::GComputation c(cv::GIn(in), cv::GOut(out)); + c.apply(cv::gin(in_vec), cv::gout(out_vec_gapi), getCompileArgs()); + // OpenCV code ///////////////////////////////////////////////////////////// + { + cv::fitLine(in_vec, out_vec_ocv, distType, paramDefault, repsDefault, aepsDefault); + } + // Comparison ////////////////////////////////////////////////////////////// + { + EXPECT_TRUE(cmpF(out_vec_gapi, out_vec_ocv)); + } +} + +TEST_P(FitLine2DVector32FTest, AccuracyTest) +{ + cv::Vec4f out_vec_gapi, out_vec_ocv; + double paramDefault = 0., repsDefault = 0., aepsDefault = 0.; + + std::vector in_vec; + initPointsVectorRandU(sz.width, in_vec); + + // G-API code ////////////////////////////////////////////////////////////// + cv::GArray in; + auto out = cv::gapi::fitLine2D(in, distType, paramDefault, repsDefault, aepsDefault); + + cv::GComputation c(cv::GIn(in), cv::GOut(out)); + c.apply(cv::gin(in_vec), cv::gout(out_vec_gapi), getCompileArgs()); + // OpenCV code ///////////////////////////////////////////////////////////// + { + cv::fitLine(in_vec, out_vec_ocv, distType, paramDefault, repsDefault, aepsDefault); + } + // Comparison ////////////////////////////////////////////////////////////// + { + EXPECT_TRUE(cmpF(out_vec_gapi, out_vec_ocv)); + } +} + +TEST_P(FitLine2DVector64FTest, AccuracyTest) +{ + cv::Vec4f out_vec_gapi, out_vec_ocv; + double paramDefault = 0., repsDefault = 0., aepsDefault = 0.; + + std::vector in_vec; + initPointsVectorRandU(sz.width, in_vec); + + // G-API code ////////////////////////////////////////////////////////////// + cv::GArray in; + auto out = cv::gapi::fitLine2D(in, distType, paramDefault, repsDefault, aepsDefault); + + cv::GComputation c(cv::GIn(in), cv::GOut(out)); + c.apply(cv::gin(in_vec), cv::gout(out_vec_gapi), getCompileArgs()); + // OpenCV code ///////////////////////////////////////////////////////////// + { + cv::fitLine(in_vec, out_vec_ocv, distType, paramDefault, repsDefault, aepsDefault); + } + // Comparison ////////////////////////////////////////////////////////////// + { + EXPECT_TRUE(cmpF(out_vec_gapi, out_vec_ocv)); + } +} + +TEST_P(FitLine3DMatVectorTest, AccuracyTest) +{ + cv::Vec6f out_vec_gapi, out_vec_ocv; + double paramDefault = 0., repsDefault = 0., aepsDefault = 0.; + + // G-API code ////////////////////////////////////////////////////////////// + cv::GMat in; + auto out = cv::gapi::fitLine3D(in, distType, paramDefault, repsDefault, aepsDefault); + + cv::GComputation c(cv::GIn(in), cv::GOut(out)); + c.apply(cv::gin(in_mat1), cv::gout(out_vec_gapi), getCompileArgs()); + // OpenCV code ///////////////////////////////////////////////////////////// + { + cv::fitLine(in_mat1, out_vec_ocv, distType, paramDefault, repsDefault, aepsDefault); + } + // Comparison ////////////////////////////////////////////////////////////// + { + EXPECT_TRUE(cmpF(out_vec_gapi, out_vec_ocv)); + } +} + +TEST_P(FitLine3DVector32STest, AccuracyTest) +{ + cv::Vec6f out_vec_gapi, out_vec_ocv; + double paramDefault = 0., repsDefault = 0., aepsDefault = 0.; + + std::vector in_vec; + initPointsVectorRandU(sz.width, in_vec); + + // G-API code ////////////////////////////////////////////////////////////// + cv::GArray in; + auto out = cv::gapi::fitLine3D(in, distType, paramDefault, repsDefault, aepsDefault); + + cv::GComputation c(cv::GIn(in), cv::GOut(out)); + c.apply(cv::gin(in_vec), cv::gout(out_vec_gapi), getCompileArgs()); + // OpenCV code ///////////////////////////////////////////////////////////// + { + cv::fitLine(in_vec, out_vec_ocv, distType, paramDefault, repsDefault, aepsDefault); + } + // Comparison ////////////////////////////////////////////////////////////// + { + EXPECT_TRUE(cmpF(out_vec_gapi, out_vec_ocv)); + } +} + +TEST_P(FitLine3DVector32FTest, AccuracyTest) +{ + cv::Vec6f out_vec_gapi, out_vec_ocv; + double paramDefault = 0., repsDefault = 0., aepsDefault = 0.; + + std::vector in_vec; + initPointsVectorRandU(sz.width, in_vec); + + // G-API code ////////////////////////////////////////////////////////////// + cv::GArray in; + auto out = cv::gapi::fitLine3D(in, distType, paramDefault, repsDefault, aepsDefault); + + cv::GComputation c(cv::GIn(in), cv::GOut(out)); + c.apply(cv::gin(in_vec), cv::gout(out_vec_gapi), getCompileArgs()); + // OpenCV code ///////////////////////////////////////////////////////////// + { + cv::fitLine(in_vec, out_vec_ocv, distType, paramDefault, repsDefault, aepsDefault); + } + // Comparison ////////////////////////////////////////////////////////////// + { + EXPECT_TRUE(cmpF(out_vec_gapi, out_vec_ocv)); + } +} + +TEST_P(FitLine3DVector64FTest, AccuracyTest) +{ + cv::Vec6f out_vec_gapi, out_vec_ocv; + double paramDefault = 0., repsDefault = 0., aepsDefault = 0.; + + std::vector in_vec; + initPointsVectorRandU(sz.width, in_vec); + + // G-API code ////////////////////////////////////////////////////////////// + cv::GArray in; + auto out = cv::gapi::fitLine3D(in, distType, paramDefault, repsDefault, aepsDefault); + + cv::GComputation c(cv::GIn(in), cv::GOut(out)); + c.apply(cv::gin(in_vec), cv::gout(out_vec_gapi), getCompileArgs()); + // OpenCV code ///////////////////////////////////////////////////////////// + { + cv::fitLine(in_vec, out_vec_ocv, distType, paramDefault, repsDefault, aepsDefault); + } + // Comparison ////////////////////////////////////////////////////////////// + { + EXPECT_TRUE(cmpF(out_vec_gapi, out_vec_ocv)); + } +} + +TEST_P(BGR2RGBTest, AccuracyTest) +{ + // G-API code ////////////////////////////////////////////////////////////// + cv::GMat in; + auto out = cv::gapi::BGR2RGB(in); + + cv::GComputation c(in, out); + c.apply(in_mat1, out_mat_gapi, getCompileArgs()); + // OpenCV code ///////////////////////////////////////////////////////////// + { + cv::cvtColor(in_mat1, out_mat_ocv, cv::COLOR_BGR2RGB); + } + // Comparison ////////////////////////////////////////////////////////////// + { + EXPECT_TRUE(cmpF(out_mat_gapi, out_mat_ocv)); + EXPECT_EQ(out_mat_gapi.size(), sz); + } +} + TEST_P(RGB2GrayTest, AccuracyTest) { // G-API code ////////////////////////////////////////////////////////////// @@ -523,6 +1033,82 @@ TEST_P(YUV2RGBTest, AccuracyTest) } } +TEST_P(BGR2I420Test, AccuracyTest) +{ + // G-API code ////////////////////////////////////////////////////////////// + cv::GMat in; + auto out = cv::gapi::BGR2I420(in); + + cv::GComputation c(in, out); + c.apply(in_mat1, out_mat_gapi, getCompileArgs()); + // OpenCV code ///////////////////////////////////////////////////////////// + { + cv::cvtColor(in_mat1, out_mat_ocv, cv::COLOR_BGR2YUV_I420); + } + // Comparison ////////////////////////////////////////////////////////////// + { + EXPECT_TRUE(cmpF(out_mat_gapi, out_mat_ocv)); + EXPECT_EQ(out_mat_gapi.size(), Size(sz.width, sz.height * 3 / 2)); + } +} + +TEST_P(RGB2I420Test, AccuracyTest) +{ + // G-API code ////////////////////////////////////////////////////////////// + cv::GMat in; + auto out = cv::gapi::RGB2I420(in); + + cv::GComputation c(in, out); + c.apply(in_mat1, out_mat_gapi, getCompileArgs()); + // OpenCV code ///////////////////////////////////////////////////////////// + { + cv::cvtColor(in_mat1, out_mat_ocv, cv::COLOR_RGB2YUV_I420); + } + // Comparison ////////////////////////////////////////////////////////////// + { + EXPECT_TRUE(cmpF(out_mat_gapi, out_mat_ocv)); + EXPECT_EQ(out_mat_gapi.size(), Size(sz.width, sz.height * 3 / 2)); + } +} + +TEST_P(I4202BGRTest, AccuracyTest) +{ + // G-API code ////////////////////////////////////////////////////////////// + cv::GMat in; + auto out = cv::gapi::I4202BGR(in); + + cv::GComputation c(in, out); + c.apply(in_mat1, out_mat_gapi, getCompileArgs()); + // OpenCV code ///////////////////////////////////////////////////////////// + { + cv::cvtColor(in_mat1, out_mat_ocv, cv::COLOR_YUV2BGR_I420); + } + // Comparison ////////////////////////////////////////////////////////////// + { + EXPECT_TRUE(cmpF(out_mat_gapi, out_mat_ocv)); + EXPECT_EQ(out_mat_gapi.size(), Size(sz.width, sz.height * 2 / 3)); + } +} + +TEST_P(I4202RGBTest, AccuracyTest) +{ + // G-API code ////////////////////////////////////////////////////////////// + cv::GMat in; + auto out = cv::gapi::I4202RGB(in); + + cv::GComputation c(in, out); + c.apply(in_mat1, out_mat_gapi, getCompileArgs()); + // OpenCV code ///////////////////////////////////////////////////////////// + { + cv::cvtColor(in_mat1, out_mat_ocv, cv::COLOR_YUV2RGB_I420); + } + // Comparison ////////////////////////////////////////////////////////////// + { + EXPECT_TRUE(cmpF(out_mat_gapi, out_mat_ocv)); + EXPECT_EQ(out_mat_gapi.size(), Size(sz.width, sz.height * 2 / 3)); + } +} + TEST_P(NV12toRGBTest, AccuracyTest) { // G-API code ////////////////////////////////////////////////////////////// diff --git a/inference-engine/thirdparty/fluid/modules/gapi/test/common/gapi_parsers_tests_common.hpp b/inference-engine/thirdparty/fluid/modules/gapi/test/common/gapi_parsers_tests_common.hpp index 127a1c5a5e4..328f86b8517 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/test/common/gapi_parsers_tests_common.hpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/test/common/gapi_parsers_tests_common.hpp @@ -176,7 +176,7 @@ private: int randInRange(const int start, const int end) { GAPI_Assert(start <= end); - return start + std::rand() % (end - start + 1); + return theRNG().uniform(start, end); } cv::Rect generateBox(const cv::Size& in_sz) @@ -211,7 +211,7 @@ private: SSDitem it; it.image_id = static_cast(i); it.label = static_cast(randInRange(0, 9)); - it.confidence = static_cast(std::rand()) / RAND_MAX; + it.confidence = theRNG().uniform(0.f, 1.f); auto box = generateBox(in_sz); it.rc_left = normalize(box.x, in_sz.width); it.rc_right = normalize(box.x + box.width, in_sz.width); @@ -225,16 +225,30 @@ private: class ParserYoloTest { public: - cv::Mat generateYoloOutput(const int num_classes) + cv::Mat generateYoloOutput(const int num_classes, std::pair dims_config = {false, 4}) { - std::vector dims = { 1, 13, 13, (num_classes + 5) * 5 }; + bool one_dim = false; + int num_dims = 0; + std::tie(one_dim, num_dims) = dims_config; + GAPI_Assert(num_dims <= 4); + GAPI_Assert((!one_dim && num_dims >= 3) || + ( one_dim && num_dims >= 1)); + std::vector dims(num_dims, 1); + if (one_dim) { + dims.back() = (num_classes+5)*5*13*13; + } else { + dims.back() = (num_classes+5)*5; + dims[num_dims-2] = 13; + dims[num_dims-3] = 13; + } cv::Mat mat(dims, CV_32FC1); auto data = mat.ptr(); - const size_t range = dims[0] * dims[1] * dims[2] * dims[3]; + const size_t range = std::accumulate(dims.begin(), dims.end(), 1, std::multiplies()); + cv::RNG& rng = theRNG(); for (size_t i = 0; i < range; ++i) { - data[i] = static_cast(std::rand()) / RAND_MAX; + data[i] = rng.uniform(0.f, 1.f); } return mat; } diff --git a/inference-engine/thirdparty/fluid/modules/gapi/test/common/gapi_tests_common.hpp b/inference-engine/thirdparty/fluid/modules/gapi/test/common/gapi_tests_common.hpp index 113f3c73c0c..6d118813720 100644 --- a/inference-engine/thirdparty/fluid/modules/gapi/test/common/gapi_tests_common.hpp +++ b/inference-engine/thirdparty/fluid/modules/gapi/test/common/gapi_tests_common.hpp @@ -74,6 +74,50 @@ namespace } #endif // WINRT } + + template inline void initPointRandU(cv::RNG &rng, cv::Point_& pt) + { + GAPI_Assert(std::is_integral::value); + pt = cv::Point_(static_cast(static_cast(rng(CHAR_MAX + 1U))), + static_cast(static_cast(rng(CHAR_MAX + 1U)))); + } + + template inline void initPointRandU(cv::RNG &rng, cv::Point3_& pt) + { + GAPI_Assert(std::is_integral::value); + pt = cv::Point3_(static_cast(static_cast(rng(CHAR_MAX + 1U))), + static_cast(static_cast(rng(CHAR_MAX + 1U))), + static_cast(static_cast(rng(CHAR_MAX + 1U)))); + } + + template inline void initFloatPointRandU(cv::RNG &rng, cv::Point_ &pt) + { + GAPI_Assert(std::is_floating_point::value); + static const int fscale = 256; // avoid bits near ULP, generate stable test input + pt = cv::Point_(rng.uniform(0, 255 * fscale) / static_cast(fscale), + rng.uniform(0, 255 * fscale) / static_cast(fscale)); + } + + template<> inline void initPointRandU(cv::RNG &rng, cv::Point2f &pt) + { initFloatPointRandU(rng, pt); } + + template<> inline void initPointRandU(cv::RNG &rng, cv::Point2d &pt) + { initFloatPointRandU(rng, pt); } + + template inline void initFloatPointRandU(cv::RNG &rng, cv::Point3_ &pt) + { + GAPI_Assert(std::is_floating_point::value); + static const int fscale = 256; // avoid bits near ULP, generate stable test input + pt = cv::Point3_(rng.uniform(0, 255 * fscale) / static_cast(fscale), + rng.uniform(0, 255 * fscale) / static_cast(fscale), + rng.uniform(0, 255 * fscale) / static_cast(fscale)); + } + + template<> inline void initPointRandU(cv::RNG &rng, cv::Point3f &pt) + { initFloatPointRandU(rng, pt); } + + template<> inline void initPointRandU(cv::RNG &rng, cv::Point3d &pt) + { initFloatPointRandU(rng, pt); } } // namespace namespace opencv_test @@ -279,6 +323,80 @@ public: } } + template + inline void initPointRandU(cv::RNG& rng, T& pt) + { ::initPointRandU(rng, pt); } + +// Disable unreachable code warning for MSVS 2015 +#if defined _MSC_VER && _MSC_VER < 1910 /*MSVS 2017*/ +#pragma warning(push) +#pragma warning(disable: 4702) +#endif + // initialize std::vector>/std::vector> + template class Pt> + void initPointsVectorRandU(const int sz_in, std::vector> &vec_) + { + cv::RNG& rng = theRNG(); + + vec_.clear(); + vec_.reserve(sz_in); + + for (int i = 0; i < sz_in; i++) + { + Pt pt; + initPointRandU(rng, pt); + vec_.emplace_back(pt); + } + } +#if defined _MSC_VER && _MSC_VER < 1910 /*MSVS 2017*/ +#pragma warning(pop) +#endif + + template + inline void initMatByPointsVectorRandU(const cv::Size &sz_in) + { + std::vector in_vector; + initPointsVectorRandU(sz_in.width, in_vector); + in_mat1 = cv::Mat(in_vector, true); + } + + // initialize Mat by a vector of Points + template