Merge branch 'master' into github_actions/tests_parallel
This commit is contained in:
commit
aba622cf31
@ -2,6 +2,12 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: The list of types of devices and corresponding plugins which
|
||||
are compatible with OpenVINO Runtime and support inference
|
||||
of deep learning models.
|
||||
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:hidden:
|
||||
@ -9,13 +15,9 @@
|
||||
openvino_docs_OV_UG_query_api
|
||||
openvino_docs_OV_UG_supported_plugins_CPU
|
||||
openvino_docs_OV_UG_supported_plugins_GPU
|
||||
openvino_docs_OV_UG_supported_plugins_NPU
|
||||
openvino_docs_OV_UG_supported_plugins_GNA
|
||||
|
||||
.. meta::
|
||||
:description: The list of types of devices and corresponding plugins which
|
||||
are compatible with OpenVINO Runtime and support inference
|
||||
of deep learning models.
|
||||
|
||||
|
||||
OpenVINO™ Runtime can infer deep learning models using the following device types:
|
||||
|
||||
|
28
docs/OV_Runtime_UG/supported_plugins/NPU.md
Normal file
28
docs/OV_Runtime_UG/supported_plugins/NPU.md
Normal file
@ -0,0 +1,28 @@
|
||||
# NPU Device {#openvino_docs_OV_UG_supported_plugins_NPU}
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: The NPU plugin in the Intel® Distribution of OpenVINO™ toolkit
|
||||
aims at high performance inference of neural
|
||||
networks on the low-power NPU processing device.
|
||||
|
||||
|
||||
NPU is a new generation of low-power processing unit dedicated to processing neural networks.
|
||||
The NPU plugin is a core part of the OpenVINO™ toolkit. For its in-depth description, see:
|
||||
|
||||
..
|
||||
- `NPU plugin developer documentation < cmake_options_for_custom_compilation.md ??? >`__.
|
||||
- `NPU plugin source files < ??? >`__.
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@endsphinxdirective
|
71
docs/install_guides/configurations-for-intel-npu.md
Normal file
71
docs/install_guides/configurations-for-intel-npu.md
Normal file
@ -0,0 +1,71 @@
|
||||
# Configurations for Intel® NPU with OpenVINO™ {#openvino_docs_install_guides_configurations_for_intel_npu}
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn how to provide additional configuration for Intel®
|
||||
NPU to work with the OpenVINO™ toolkit on your system.
|
||||
|
||||
|
||||
|
||||
Drivers and Dependencies
|
||||
########################
|
||||
|
||||
|
||||
The Intel® NPU device requires a proper driver to be installed on the system.
|
||||
|
||||
|
||||
|
||||
Linux
|
||||
####################
|
||||
|
||||
Prerequisites
|
||||
++++++++++++++++++++
|
||||
|
||||
Ensure that make, gcc, and Linux kernel headers are installed. Use the following command to install the required software:
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
sudo apt-get install gcc make linux-headers-generic
|
||||
|
||||
|
||||
Configuration steps
|
||||
++++++++++++++++++++
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
Windows
|
||||
####################
|
||||
|
||||
Intel® NPU driver for Windows is available through Windows Update.
|
||||
|
||||
|
||||
|
||||
|
||||
What’s Next?
|
||||
####################
|
||||
|
||||
Now you are ready to try out OpenVINO™. You can use the following tutorials to write your applications using Python and C++.
|
||||
|
||||
* Developing in Python:
|
||||
|
||||
* `Start with tensorflow models with OpenVINO™ <notebooks/101-tensorflow-to-openvino-with-output.html>`__
|
||||
* `Start with ONNX and PyTorch models with OpenVINO™ <notebooks/102-pytorch-onnx-to-openvino-with-output.html>`__
|
||||
* `Start with PaddlePaddle models with OpenVINO™ <notebooks/103-paddle-to-openvino-classification-with-output.html>`__
|
||||
|
||||
* Developing in C++:
|
||||
|
||||
* :doc:`Image Classification Async C++ Sample <openvino_inference_engine_samples_classification_sample_async_README>`
|
||||
* :doc:`Hello Classification C++ Sample <openvino_inference_engine_samples_hello_classification_README>`
|
||||
* :doc:`Hello Reshape SSD C++ Sample <openvino_inference_engine_samples_hello_reshape_ssd_README>`
|
||||
|
||||
@endsphinxdirective
|
||||
|
@ -13,6 +13,7 @@
|
||||
:hidden:
|
||||
|
||||
For GPU <openvino_docs_install_guides_configurations_for_intel_gpu>
|
||||
For NPU <openvino_docs_install_guides_configurations_for_intel_npu>
|
||||
For GNA <openvino_docs_install_guides_configurations_for_intel_gna>
|
||||
|
||||
|
||||
|
574
docs/notebooks/250-music-generation-with-output.rst
Normal file
574
docs/notebooks/250-music-generation-with-output.rst
Normal file
File diff suppressed because one or more lines are too long
1136
docs/notebooks/251-tiny-sd-image-generation-with-output.rst
Normal file
1136
docs/notebooks/251-tiny-sd-image-generation-with-output.rst
Normal file
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:5d4313551a427c44d4bba572717be2134c99e0e8785a8f11d5fd2993ccfb10da
|
||||
size 470354
|
@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:e97e1e206d64979d8fc9aaa6e46786172062c9a9eb1817a351af2a38557a012f
|
||||
size 835162
|
@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:7e4fb5a7c284be4de90e648badd409935c29b561b30f728ff905f522dd5a99f9
|
||||
size 699062
|
@ -30,6 +30,7 @@
|
||||
229-distilbert-sequence-classification
|
||||
243-tflite-selfie-segmentation
|
||||
247-code-language-id
|
||||
250-music-generation
|
||||
401-object-detection
|
||||
402-pose-estimation
|
||||
403-action-recognition-webcam
|
||||
|
@ -21,6 +21,8 @@
|
||||
232-clip-language-saliency-map
|
||||
243-tflite-selfie-segmentation
|
||||
244-named-entity-recognition
|
||||
250-music-generation
|
||||
251-tiny-sd-image-generation
|
||||
305-tensorflow-quantization-aware-training
|
||||
401-object-detection
|
||||
404-style-transfer
|
||||
|
@ -9,12 +9,17 @@
|
||||
|
||||
|
||||
The OpenVINO runtime can infer various models of different input and output formats. Here, you can find configurations
|
||||
supported by OpenVINO devices, which are CPU, GPU, and GNA (Gaussian Neural Accelerator coprocessor).
|
||||
supported by OpenVINO devices, which are CPU, GPU, NPU, and GNA (Gaussian Neural Accelerator coprocessor).
|
||||
Currently, processors of the 11th generation and later (up to the 13th generation at the moment) provide a further performance boost, especially with INT8 models.
|
||||
|
||||
.. note::
|
||||
|
||||
With OpenVINO™ 2023.0 release, support has been cancelled for all VPU accelerators based on Intel® Movidius™.
|
||||
With OpenVINO™ 2023.0 release, support has been cancelled for:
|
||||
- Intel® Neural Compute Stick 2 powered by the Intel® Movidius™ Myriad™ X
|
||||
- Intel® Vision Accelerator Design with Intel® Movidius™
|
||||
|
||||
To keep using the MYRIAD and HDDL plugins with your hardware, revert to the OpenVINO 2022.3 LTS release.
|
||||
|
||||
|
||||
|
||||
+---------------------------------------------------------------------+------------------------------------------------------------------------------------------------------+
|
||||
@ -31,7 +36,7 @@ Currently, processors of the 11th generation and later (up to the 13th generatio
|
||||
|| :doc:`GPU <openvino_docs_OV_UG_supported_plugins_GPU>` | Intel® Processor Graphics including Intel® HD Graphics and Intel® Iris® Graphics, |
|
||||
|| | Intel® Arc™ A-Series Graphics, Intel® Data Center GPU Flex Series, Intel® Data Center GPU Max Series |
|
||||
+---------------------------------------------------------------------+------------------------------------------------------------------------------------------------------+
|
||||
|| :doc:`GNA plugin <openvino_docs_OV_UG_supported_plugins_GNA>` | Intel® Speech Enabling Developer Kit, Amazon Alexa* Premium Far-Field Developer Kit, Intel® |
|
||||
|| :doc:`GNA <openvino_docs_OV_UG_supported_plugins_GNA>` | Intel® Speech Enabling Developer Kit, Amazon Alexa* Premium Far-Field Developer Kit, Intel® |
|
||||
|| (available in the Intel® Distribution of OpenVINO™ toolkit) | Pentium® Silver J5005 Processor, Intel® Pentium® Silver N5000 Processor, Intel® |
|
||||
|| | Celeron® J4005 Processor, Intel® Celeron® J4105 Processor, Intel® Celeron® |
|
||||
|| | Processor N4100, Intel® Celeron® Processor N4000, Intel® Core™ i3-8121U Processor, |
|
||||
@ -41,7 +46,15 @@ Currently, processors of the 11th generation and later (up to the 13th generatio
|
||||
|| | Intel® Core™ i3-1005G1 Processor, Intel® Core™ i3-1000G1 Processor, |
|
||||
|| | Intel® Core™ i3-1000G4 Processor |
|
||||
+---------------------------------------------------------------------+------------------------------------------------------------------------------------------------------+
|
||||
|
||||
|| :doc:`NPU <openvino_docs_OV_UG_supported_plugins_NPU>` | |
|
||||
|| | |
|
||||
|| | |
|
||||
|| | |
|
||||
|| | |
|
||||
|| | |
|
||||
|| | |
|
||||
|| | |
|
||||
+---------------------------------------------------------------------+------------------------------------------------------------------------------------------------------+
|
||||
|
||||
Beside inference using a specific device, OpenVINO offers three inference modes for automated inference management. These are:
|
||||
|
||||
|
@ -257,6 +257,10 @@ Demos that demonstrate inference on a particular model.
|
||||
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+
|
||||
| `249-oneformer-segmentation <notebooks/249-oneformer-segmentation-with-output.html>`__ | Universal segmentation with OneFormer and OpenVINO™. | |n249-img1| |
|
||||
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+
|
||||
| `250-music-generation <notebooks/250-music-generation-with-output.html>`__ |br| |n250| |br| |c250| | Controllable Music Generation with MusicGen and OpenVINO™. | |n250-img1| |
|
||||
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+
|
||||
| `251-tiny-sd-image-generation <notebooks/251-tiny-sd-image-generation-with-output.html>`__ |br| |c251| | Image Generation with Tiny-SD and OpenVINO™. | |n251-img1| |
|
||||
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+
|
||||
|
||||
|
||||
Model Training
|
||||
@ -318,7 +322,7 @@ The following tutorials are guaranteed to provide a great experience with infere
|
||||
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+
|
||||
| `Whisper - Subtitles generation <notebooks/227-whisper-subtitles-generation-with-output.html>`__ |br| |c227| | Generate subtitles for video with OpenAI Whisper and OpenVINO. | |n227-img1| |
|
||||
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+
|
||||
| `CLIP - zero-shot-image-classification <notebooks/228-clip-zero-shot-image-classification-with-output.html>`__ | Perform Zero-shot image classification with CLIP and OpenVINO. | |n228-img1| |
|
||||
| `CLIP - zero-shot-image-classification <notebooks/228-clip-zero-shot-convert-with-output.html>`__ | Perform Zero-shot image classification with CLIP and OpenVINO. | |n228-img1| |
|
||||
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+
|
||||
| `BLIP - Visual-language-processing <notebooks/233-blip-visual-language-processing-with-output.html>`__ | Visual question answering and image captioning using BLIP and OpenVINO™. | |n233-img1| |
|
||||
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+
|
||||
@ -326,11 +330,15 @@ The following tutorials are guaranteed to provide a great experience with infere
|
||||
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+
|
||||
| `DeepFloyd IF - Text-to-Image generation <notebooks/238-deep-floyd-if-with-output.html>`__ | Text-to-image generation with DeepFloyd IF and OpenVINO™. | |n238-img1| |
|
||||
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+
|
||||
| `ImageBind <notebooks/239-image-bind-with-output.html>`__ | Binding multimodal data, using ImageBind and OpenVINO™. | |n239-img1| |
|
||||
| `ImageBind <notebooks/239-image-bind-convert-with-output.html>`__ | Binding multimodal data, using ImageBind and OpenVINO™. | |n239-img1| |
|
||||
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+
|
||||
| `Dolly v2 <notebooks/240-dolly-2-instruction-following-with-output.html>`__ | Instruction following using Databricks Dolly 2.0 and OpenVINO™. | |n240-img1| |
|
||||
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+
|
||||
| `248-stable-diffusion-xl <notebooks/248-stable-diffusion-xl-with-output.html>`__ | Image generation with Stable Diffusion XL and OpenVINO™. | |n248-img1| |
|
||||
| `Stable Diffusion XL <notebooks/248-stable-diffusion-xl-with-output.html>`__ | Image generation with Stable Diffusion XL and OpenVINO™. | |n248-img1| |
|
||||
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+
|
||||
| `MusicGen <notebooks/250-music-generation-with-output.html>`__ |br| |n250| |br| |c250| | Controllable Music Generation with MusicGen and OpenVINO™. | |n250-img1| |
|
||||
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+
|
||||
| `Tiny SD <notebooks/251-tiny-sd-image-generation-with-output.html>`__ |br| |c251| | Image Generation with Tiny-SD and OpenVINO™. | |n251-img1| |
|
||||
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+
|
||||
|
||||
|
||||
@ -487,6 +495,10 @@ Made with `contributors-img <https://contrib.rocks>`__.
|
||||
:target: https://user-images.githubusercontent.com/29454499/258651862-28b63016-c5ff-4263-9da8-73ca31100165.jpeg
|
||||
.. |n249-img1| image:: https://camo.githubusercontent.com/f46c3642d3266e9d56d8ea8a943e67825597de3ff51698703ea2ddcb1086e541/68747470733a2f2f6769746875622d70726f64756374696f6e2d757365722d61737365742d3632313064662e73332e616d617a6f6e6177732e636f6d2f37363136313235362f3235383634303731332d66383031626430392d653932372d346162642d616132662d3939393064653463616638642e676966
|
||||
:target: https://camo.githubusercontent.com/f46c3642d3266e9d56d8ea8a943e67825597de3ff51698703ea2ddcb1086e541/68747470733a2f2f6769746875622d70726f64756374696f6e2d757365722d61737365742d3632313064662e73332e616d617a6f6e6177732e636f6d2f37363136313235362f3235383634303731332d66383031626430392d653932372d346162642d616132662d3939393064653463616638642e676966
|
||||
.. |n250-img1| image:: https://user-images.githubusercontent.com/76463150/260439306-81c81c8d-1f9c-41d0-b881-9491766def8e.png
|
||||
:target: https://user-images.githubusercontent.com/76463150/260439306-81c81c8d-1f9c-41d0-b881-9491766def8e.png
|
||||
.. |n251-img1| image:: https://user-images.githubusercontent.com/29454499/260904650-274fc2f9-24d2-46a3-ac3d-d660ec3c9a19.png
|
||||
:target: https://user-images.githubusercontent.com/29454499/260904650-274fc2f9-24d2-46a3-ac3d-d660ec3c9a19.png
|
||||
.. |n301-img1| image:: https://user-images.githubusercontent.com/15709723/127779607-8fa34947-1c35-4260-8d04-981c41a2a2cc.png
|
||||
:target: https://user-images.githubusercontent.com/15709723/127779607-8fa34947-1c35-4260-8d04-981c41a2a2cc.png
|
||||
.. |n401-img1| image:: https://user-images.githubusercontent.com/4547501/141471665-82b28c86-cf64-4bfe-98b3-c314658f2d96.gif
|
||||
@ -647,6 +659,12 @@ Made with `contributors-img <https://contrib.rocks>`__.
|
||||
:target: https://colab.research.google.com/github/openvinotoolkit/openvino_notebooks/blob/main/notebooks/244-named-entity-recognition/244-named-entity-recognition.ipynb
|
||||
.. |n247| image:: https://mybinder.org/badge_logo.svg
|
||||
:target: https://mybinder.org/v2/gh/openvinotoolkit/openvino_notebooks/HEAD?filepath=notebooks%2F247-code-language-id%2F247-code-language-id.ipynb
|
||||
.. |n250| image:: https://mybinder.org/badge_logo.svg
|
||||
:target: https://mybinder.org/v2/gh/openvinotoolkit/openvino_notebooks/HEAD?filepath=notebooks%2F250-music-generation%2F250-music-generation.ipynb
|
||||
.. |c250| image:: https://camo.githubusercontent.com/84f0493939e0c4de4e6dbe113251b4bfb5353e57134ffd9fcab6b8714514d4d1/68747470733a2f2f636f6c61622e72657365617263682e676f6f676c652e636f6d2f6173736574732f636f6c61622d62616467652e737667
|
||||
:target: https://colab.research.google.com/github/openvinotoolkit/openvino_notebooks/blob/main/notebooks/250-music-generation/250-music-generation.ipynb
|
||||
.. |c251| image:: https://camo.githubusercontent.com/84f0493939e0c4de4e6dbe113251b4bfb5353e57134ffd9fcab6b8714514d4d1/68747470733a2f2f636f6c61622e72657365617263682e676f6f676c652e636f6d2f6173736574732f636f6c61622d62616467652e737667
|
||||
:target: https://colab.research.google.com/github/openvinotoolkit/openvino_notebooks/blob/main/notebooks/251-tiny-sd-image-generation/251-tiny-sd-image-generation.ipynb
|
||||
.. |c305| image:: https://camo.githubusercontent.com/84f0493939e0c4de4e6dbe113251b4bfb5353e57134ffd9fcab6b8714514d4d1/68747470733a2f2f636f6c61622e72657365617263682e676f6f676c652e636f6d2f6173736574732f636f6c61622d62616467652e737667
|
||||
:width: 109
|
||||
:target: https://colab.research.google.com/github/openvinotoolkit/openvino_notebooks/blob/main/notebooks/305-tensorflow-quantization-aware-training/305-tensorflow-quantization-aware-training.ipynb
|
||||
|
@ -154,12 +154,13 @@ template <OutputVector (*T)(const NodeContext&), size_t in_idx = 0, size_t out_i
|
||||
OutputVector quantizable_op(const NodeContext& context) {
|
||||
auto translation_res = T(context);
|
||||
FRONT_END_OP_CONVERSION_CHECK(translation_res.size() > out_idx, "Not enough outputs to apply quantization.");
|
||||
if (const auto quantized_pt_node = cast_quantized_fw_node(context.get_input(in_idx).get_node_shared_ptr())) {
|
||||
return {context.mark_node(std::make_shared<QuantizedPtNode>(quantized_pt_node->get_type(),
|
||||
translation_res[out_idx],
|
||||
quantized_pt_node->get_scale(),
|
||||
quantized_pt_node->get_zero_point(),
|
||||
quantized_pt_node->get_dtype()))};
|
||||
auto target_input = context.get_input(in_idx);
|
||||
if (const auto quantized_pt_node = cast_quantized_fw_node(target_input.get_node_shared_ptr())) {
|
||||
return {quantize(context,
|
||||
translation_res[out_idx],
|
||||
quantized_pt_node->get_scale(),
|
||||
quantized_pt_node->get_zero_point(),
|
||||
target_input)};
|
||||
}
|
||||
return translation_res;
|
||||
}
|
||||
|
@ -1,11 +1,9 @@
|
||||
# Copyright (C) 2018-2023 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import unittest
|
||||
from argparse import Namespace
|
||||
|
||||
import pytest
|
||||
import numpy as np
|
||||
from generator import generator, generate
|
||||
|
||||
from openvino.tools.mo.back.ChangeRandomUniformOutputType import ChangeRandomUniformOutputType
|
||||
from openvino.tools.mo.graph.graph import Node
|
||||
@ -31,15 +29,14 @@ edges_with_convert = [*connect('placeholder', '0:random_uniform'), *connect('min
|
||||
*connect('convert', 'result'), ]
|
||||
|
||||
|
||||
@generator
|
||||
class ChangeRandomUniformOutputTypeTest(unittest.TestCase):
|
||||
@generate(*[
|
||||
("FP16", np.float32, np.float16),
|
||||
("FP32", np.float16, np.float32),
|
||||
("FP32", np.float32, None),
|
||||
("FP32", np.int64, None)
|
||||
])
|
||||
def test_change_random_uniform_output_type(self, ir_type, out_type, dst_type):
|
||||
class TestChangeRandomUniformOutputType():
|
||||
@pytest.mark.parametrize("ir_type, out_type, dst_type", [
|
||||
("FP16", np.float32, np.float16),
|
||||
("FP32", np.float16, np.float32),
|
||||
("FP32", np.float32, None),
|
||||
("FP32", np.int64, None)
|
||||
])
|
||||
def test_change_random_uniform_output_type(self,ir_type, out_type, dst_type):
|
||||
graph = build_graph(nodes, edges, cli=Namespace(data_type=ir_type))
|
||||
graph_ref = build_graph(nodes, edges if dst_type is None else edges_with_convert, {},
|
||||
nodes_with_edges_only=True)
|
||||
@ -48,8 +45,8 @@ class ChangeRandomUniformOutputTypeTest(unittest.TestCase):
|
||||
ChangeRandomUniformOutputType().find_and_replace_pattern(graph)
|
||||
|
||||
(flag, resp) = compare_graphs(graph, graph_ref, 'result', check_op_attrs=True)
|
||||
self.assertTrue(flag, resp)
|
||||
assert flag, resp
|
||||
|
||||
if dst_type is not None:
|
||||
convert_node = Node(graph, 'random_uniform').out_port(0).get_destination().node
|
||||
self.assertTrue(convert_node['dst_type'] == dst_type)
|
||||
assert convert_node['dst_type'] == dst_type
|
||||
|
Loading…
Reference in New Issue
Block a user