diff --git a/ngraph/frontend/frontend_manager/CMakeLists.txt b/ngraph/frontend/frontend_manager/CMakeLists.txt index 5efd560357b..d11be143911 100644 --- a/ngraph/frontend/frontend_manager/CMakeLists.txt +++ b/ngraph/frontend/frontend_manager/CMakeLists.txt @@ -33,3 +33,18 @@ target_include_directories(${TARGET_NAME} PUBLIC $/python_api/${PYTHON_VERSION}/) @@ -73,11 +74,25 @@ file(GLOB_RECURSE SOURCES src/pyngraph/*.cpp) pybind11_add_module(_${PROJECT_NAME} MODULE ${SOURCES}) target_include_directories(_${PROJECT_NAME} PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/src") -target_link_libraries(_${PROJECT_NAME} PRIVATE ngraph::ngraph) + +target_link_libraries(_${PROJECT_NAME} PRIVATE ngraph::ngraph ngraph::frontend_manager) + if (TARGET ngraph::onnx_importer) add_dependencies(_${PROJECT_NAME} ngraph::onnx_importer) endif() +if(NGRAPH_UNIT_TEST_ENABLE) + add_subdirectory(tests/mock/mock_py_ngraph_frontend) + add_dependencies(_${PROJECT_NAME} mock_py_ngraph_frontend) + set_target_properties(mock_py_ngraph_frontend PROPERTIES LIBRARY_OUTPUT_DIRECTORY ${LIBRARY_OUTPUT_DIRECTORY_BIN} + ARCHIVE_OUTPUT_DIRECTORY ${LIBRARY_OUTPUT_DIRECTORY_BIN} + COMPILE_PDB_OUTPUT_DIRECTORY ${LIBRARY_OUTPUT_DIRECTORY_BIN} + PDB_OUTPUT_DIRECTORY ${LIBRARY_OUTPUT_DIRECTORY_BIN}) + + add_subdirectory(tests/mock/pyngraph_fe_mock_api) + add_dependencies(_${PROJECT_NAME} pybind_mock_frontend) +endif() + # perform copy if(OpenVINO_MAIN_SOURCE_DIR) add_custom_command(TARGET _${PROJECT_NAME} diff --git a/ngraph/python/setup.py b/ngraph/python/setup.py index dfbb03fad98..e90cc66d6d1 100644 --- a/ngraph/python/setup.py +++ b/ngraph/python/setup.py @@ -39,6 +39,7 @@ packages = [ "ngraph.impl.op", "ngraph.impl.op.util", "ngraph.impl.passes", + "ngraph.frontend", ] data_files = [] diff --git a/ngraph/python/src/ngraph/__init__.py b/ngraph/python/src/ngraph/__init__.py index 0b276049d33..f51c5cea130 100644 --- a/ngraph/python/src/ngraph/__init__.py +++ b/ngraph/python/src/ngraph/__init__.py @@ -11,11 +11,23 @@ try: except DistributionNotFound: __version__ = "0.0.0.dev0" -from ngraph.impl import Node + +from ngraph.impl import Dimension from ngraph.impl import Function +from ngraph.impl import Node +from ngraph.impl import PartialShape +from ngraph.frontend import FrontEnd +from ngraph.frontend import FrontEndCapabilities +from ngraph.frontend import FrontEndManager +from ngraph.frontend import GeneralFailure +from ngraph.frontend import NotImplementedFailure +from ngraph.frontend import InitializationFailure +from ngraph.frontend import InputModel +from ngraph.frontend import OpConversionFailure +from ngraph.frontend import OpValidationFailure +from ngraph.frontend import Place from ngraph.helpers import function_from_cnn from ngraph.helpers import function_to_cnn - from ngraph.opset7 import absolute from ngraph.opset7 import absolute as abs from ngraph.opset7 import acos diff --git a/ngraph/python/src/ngraph/frontend/__init__.py b/ngraph/python/src/ngraph/frontend/__init__.py new file mode 100644 index 00000000000..0ea21ad7c88 --- /dev/null +++ b/ngraph/python/src/ngraph/frontend/__init__.py @@ -0,0 +1,23 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +""" +Package: ngraph +Low level wrappers for the FrontEnd c++ api. +""" + +# flake8: noqa + +# main classes +from _pyngraph import FrontEndManager +from _pyngraph import FrontEnd +from _pyngraph import FrontEndCapabilities +from _pyngraph import InputModel +from _pyngraph import Place + +# exceptions +from _pyngraph import NotImplementedFailure +from _pyngraph import InitializationFailure +from _pyngraph import OpConversionFailure +from _pyngraph import OpValidationFailure +from _pyngraph import GeneralFailure diff --git a/ngraph/python/src/pyngraph/frontend/frontend.cpp b/ngraph/python/src/pyngraph/frontend/frontend.cpp new file mode 100644 index 00000000000..ecc736b37f0 --- /dev/null +++ b/ngraph/python/src/pyngraph/frontend/frontend.cpp @@ -0,0 +1,124 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include +#include + +#include "frontend_manager.hpp" +#include "frontend_manager/frontend_exceptions.hpp" +#include "frontend_manager/frontend_manager.hpp" +#include "pyngraph/function.hpp" + +namespace py = pybind11; + +void regclass_pyngraph_FrontEnd(py::module m) +{ + py::class_> fem( + m, "FrontEnd", py::dynamic_attr()); + fem.doc() = "ngraph.impl.FrontEnd wraps ngraph::frontend::FrontEnd"; + + fem.def("load_from_file", + &ngraph::frontend::FrontEnd::load_from_file, + py::arg("path"), + R"( + Loads an input model by specified model file path. + + Parameters + ---------- + path : str + Main model file path. + + Returns + ---------- + load_from_file : InputModel + Loaded input model. + )"); + + fem.def("convert", + static_cast (ngraph::frontend::FrontEnd::*)( + ngraph::frontend::InputModel::Ptr) const>(&ngraph::frontend::FrontEnd::convert), + py::arg("model"), + R"( + Completely convert and normalize entire function, throws if it is not possible. + + Parameters + ---------- + model : InputModel + Input model. + + Returns + ---------- + convert : Function + Fully converted nGraph function. + )"); + + fem.def("convert", + static_cast (ngraph::frontend::FrontEnd::*)( + std::shared_ptr) const>(&ngraph::frontend::FrontEnd::convert), + py::arg("function"), + R"( + Completely convert the remaining, not converted part of a function. + + Parameters + ---------- + function : Function + Partially converted nGraph function. + + Returns + ---------- + convert : Function + Fully converted nGraph function. + )"); + + fem.def("convert_partially", + &ngraph::frontend::FrontEnd::convert_partially, + py::arg("model"), + R"( + Convert only those parts of the model that can be converted leaving others as-is. + Converted parts are not normalized by additional transformations; normalize function or + another form of convert function should be called to finalize the conversion process. + + Parameters + ---------- + model : InputModel + Input model. + + Returns + ---------- + convert_partially : Function + Partially converted nGraph function. + )"); + + fem.def("decode", + &ngraph::frontend::FrontEnd::decode, + py::arg("model"), + R"( + Convert operations with one-to-one mapping with decoding nodes. + Each decoding node is an nGraph node representing a single FW operation node with + all attributes represented in FW-independent way. + + Parameters + ---------- + model : InputModel + Input model. + + Returns + ---------- + decode : Function + nGraph function after decoding. + )"); + + fem.def("normalize", + &ngraph::frontend::FrontEnd::normalize, + py::arg("function"), + R"( + Runs normalization passes on function that was loaded with partial conversion. + + Parameters + ---------- + function : Function + Partially converted nGraph function. + )"); +} diff --git a/ngraph/python/src/pyngraph/frontend/frontend.hpp b/ngraph/python/src/pyngraph/frontend/frontend.hpp new file mode 100644 index 00000000000..de28e950bb5 --- /dev/null +++ b/ngraph/python/src/pyngraph/frontend/frontend.hpp @@ -0,0 +1,11 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +namespace py = pybind11; + +void regclass_pyngraph_FrontEnd(py::module m); diff --git a/ngraph/python/src/pyngraph/frontend/frontend_manager.cpp b/ngraph/python/src/pyngraph/frontend/frontend_manager.cpp new file mode 100644 index 00000000000..a550f22e516 --- /dev/null +++ b/ngraph/python/src/pyngraph/frontend/frontend_manager.cpp @@ -0,0 +1,163 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include +#include + +#include "frontend_manager.hpp" +#include "frontend_manager/frontend_exceptions.hpp" +#include "frontend_manager/frontend_manager.hpp" + +namespace py = pybind11; + +void regclass_pyngraph_FrontEndManager(py::module m) +{ + py::class_> + fem(m, "FrontEndManager", py::dynamic_attr()); + fem.doc() = "ngraph.impl.FrontEndManager wraps ngraph::frontend::FrontEndManager"; + + fem.def(py::init<>()); + + fem.def("get_available_front_ends", + &ngraph::frontend::FrontEndManager::get_available_front_ends, + R"( + Gets list of registered frontends. + + Returns + ---------- + get_available_front_ends : List[str] + List of available frontend names. + )"); + fem.def("load_by_framework", + &ngraph::frontend::FrontEndManager::load_by_framework, + py::arg("framework"), + py::arg("capabilities") = ngraph::frontend::FrontEndCapabilities::FEC_DEFAULT, + R"( + Loads frontend by name of framework and capabilities. + + Parameters + ---------- + framework : str + Framework name. Throws exception if name is not in list of available frontends. + + capabilities : int + Frontend capabilities. Default is FrontEndCapabilities.FEC_DEFAULT. It is recommended to use only + those capabilities which are needed to minimize load time. + + Returns + ---------- + load_by_framework : FrontEnd + Frontend interface for further loading of models. + )"); +} + +void regclass_pyngraph_FEC(py::module m) +{ + class FeCaps + { + public: + int get_caps() const { return m_caps; } + + private: + int m_caps; + }; + + py::class_> type(m, "FrontEndCapabilities"); + // type.doc() = "FrontEndCapabilities"; + type.attr("DEFAULT") = ngraph::frontend::FrontEndCapabilities::FEC_DEFAULT; + type.attr("CUT") = ngraph::frontend::FrontEndCapabilities::FEC_CUT; + type.attr("NAMES") = ngraph::frontend::FrontEndCapabilities::FEC_NAMES; + type.attr("WILDCARDS") = ngraph::frontend::FrontEndCapabilities::FEC_WILDCARDS; + + type.def( + "__eq__", + [](const FeCaps& a, const FeCaps& b) { return a.get_caps() == b.get_caps(); }, + py::is_operator()); +} + +void regclass_pyngraph_GeneralFailureFrontEnd(py::module m) +{ + static py::exception exc(std::move(m), "GeneralFailure"); + py::register_exception_translator([](std::exception_ptr p) { + try + { + if (p) + std::rethrow_exception(p); + } + catch (const ngraph::frontend::GeneralFailure& e) + { + exc(e.what()); + } + }); +} + +void regclass_pyngraph_OpValidationFailureFrontEnd(py::module m) +{ + static py::exception exc(std::move(m), + "OpValidationFailure"); + py::register_exception_translator([](std::exception_ptr p) { + try + { + if (p) + std::rethrow_exception(p); + } + catch (const ngraph::frontend::OpValidationFailure& e) + { + exc(e.what()); + } + }); +} + +void regclass_pyngraph_OpConversionFailureFrontEnd(py::module m) +{ + static py::exception exc(std::move(m), + "OpConversionFailure"); + py::register_exception_translator([](std::exception_ptr p) { + try + { + if (p) + std::rethrow_exception(p); + } + catch (const ngraph::frontend::OpConversionFailure& e) + { + exc(e.what()); + } + }); +} + +void regclass_pyngraph_InitializationFailureFrontEnd(py::module m) +{ + static py::exception exc(std::move(m), + "InitializationFailure"); + py::register_exception_translator([](std::exception_ptr p) { + try + { + if (p) + std::rethrow_exception(p); + } + catch (const ngraph::frontend::InitializationFailure& e) + { + exc(e.what()); + } + }); +} + +void regclass_pyngraph_NotImplementedFailureFrontEnd(py::module m) +{ + static py::exception exc(std::move(m), + "NotImplementedFailure"); + py::register_exception_translator([](std::exception_ptr p) { + try + { + if (p) + std::rethrow_exception(p); + } + catch (const ngraph::frontend::NotImplementedFailure& e) + { + exc(e.what()); + } + }); +} diff --git a/ngraph/python/src/pyngraph/frontend/frontend_manager.hpp b/ngraph/python/src/pyngraph/frontend/frontend_manager.hpp new file mode 100644 index 00000000000..35caa7e5dd1 --- /dev/null +++ b/ngraph/python/src/pyngraph/frontend/frontend_manager.hpp @@ -0,0 +1,18 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +namespace py = pybind11; + +void regclass_pyngraph_FrontEndManager(py::module m); +void regclass_pyngraph_FEC(py::module m); +void regclass_pyngraph_NotImplementedFailureFrontEnd(py::module m); +void regclass_pyngraph_InitializationFailureFrontEnd(py::module m); +void regclass_pyngraph_OpConversionFailureFrontEnd(py::module m); +void regclass_pyngraph_OpValidationFailureFrontEnd(py::module m); +void regclass_pyngraph_GeneralFailureFrontEnd(py::module m); + diff --git a/ngraph/python/src/pyngraph/frontend/inputmodel.cpp b/ngraph/python/src/pyngraph/frontend/inputmodel.cpp new file mode 100644 index 00000000000..f8775763806 --- /dev/null +++ b/ngraph/python/src/pyngraph/frontend/inputmodel.cpp @@ -0,0 +1,364 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include +#include + +#include "frontend_manager/frontend_exceptions.hpp" +#include "frontend_manager/frontend_manager.hpp" + +namespace py = pybind11; + +void regclass_pyngraph_InputModel(py::module m) +{ + py::class_> im( + m, "InputModel", py::dynamic_attr()); + im.doc() = "ngraph.impl.InputModel wraps ngraph::frontend::InputModel"; + + im.def("get_place_by_tensor_name", + &ngraph::frontend::InputModel::get_place_by_tensor_name, + py::arg("tensorName"), + R"( + Returns a tensor place by a tensor name following framework conventions, or + nullptr if a tensor with this name doesn't exist. + + Parameters + ---------- + tensorName : str + Name of tensor. + + Returns + ---------- + get_place_by_tensor_name : Place + Tensor place corresponding to specified tensor name. + )"); + + im.def("get_place_by_operation_name", + &ngraph::frontend::InputModel::get_place_by_operation_name, + py::arg("operationName"), + R"( + Returns an operation place by an operation name following framework conventions, or + nullptr if an operation with this name doesn't exist. + + Parameters + ---------- + operationName : str + Name of operation. + + Returns + ---------- + get_place_by_operation_name : Place + Place representing operation. + )"); + + im.def("get_place_by_operation_name_and_input_port", + &ngraph::frontend::InputModel::get_place_by_operation_name_and_input_port, + py::arg("operationName"), + py::arg("inputPortIndex"), + R"( + Returns an input port place by operation name and appropriate port index. + + Parameters + ---------- + operationName : str + Name of operation. + + inputPortIndex : int + Index of input port for this operation. + + Returns + ---------- + get_place_by_operation_name_and_input_port : Place + Place representing input port of operation. + )"); + + im.def("get_place_by_operation_name_and_output_port", + &ngraph::frontend::InputModel::get_place_by_operation_name_and_output_port, + py::arg("operationName"), + py::arg("outputPortIndex"), + R"( + Returns an output port place by operation name and appropriate port index. + + Parameters + ---------- + operationName : str + Name of operation. + + outputPortIndex : int + Index of output port for this operation. + + Returns + ---------- + get_place_by_operation_name_and_output_port : Place + Place representing output port of operation. + )"); + + im.def("set_name_for_tensor", + &ngraph::frontend::InputModel::set_name_for_tensor, + py::arg("tensor"), + py::arg("newName"), + R"( + Sets name for tensor. Overwrites existing names of this place. + + Parameters + ---------- + tensor : Place + Tensor place. + + newName : str + New name for this tensor. + )"); + + im.def("add_name_for_tensor", + &ngraph::frontend::InputModel::add_name_for_tensor, + py::arg("tensor"), + py::arg("newName"), + R"( + Adds new name for tensor + + Parameters + ---------- + tensor : Place + Tensor place. + + newName : str + New name to be added to this place. + )"); + + im.def("set_name_for_operation", + &ngraph::frontend::InputModel::set_name_for_operation, + py::arg("operation"), + py::arg("newName"), + R"( + Adds new name for tensor. + + Parameters + ---------- + operation : Place + Operation place. + + newName : str + New name for this operation. + )"); + + im.def("free_name_for_tensor", + &ngraph::frontend::InputModel::free_name_for_tensor, + py::arg("name"), + R"( + Unassign specified name from tensor place(s). + + Parameters + ---------- + name : str + Name of tensor. + )"); + + im.def("free_name_for_operation", + &ngraph::frontend::InputModel::free_name_for_operation, + py::arg("name"), + R"( + Unassign specified name from operation place(s). + + Parameters + ---------- + name : str + Name of operation. + )"); + + im.def("set_name_for_dimension", + &ngraph::frontend::InputModel::set_name_for_dimension, + py::arg("place"), + py::arg("dimIndex"), + py::arg("dimName"), + R"( + Set name for a particular dimension of a place (e.g. batch dimension). + + Parameters + ---------- + place : Place + Model's place. + + shapeDimIndex : int + Dimension index. + + dimName : str + Name to assign on this dimension. + )"); + + im.def("cut_and_add_new_input", + &ngraph::frontend::InputModel::cut_and_add_new_input, + py::arg("place"), + py::arg("newName") = std::string(), + R"( + Cut immediately before this place and assign this place as new input; prune + all nodes that don't contribute to any output. + + Parameters + ---------- + place : Place + New place to be assigned as input. + + newNameOptional : str + Optional new name assigned to this input place. + )"); + + im.def("cut_and_add_new_output", + &ngraph::frontend::InputModel::cut_and_add_new_output, + py::arg("place"), + py::arg("newName") = std::string(), + R"( + Cut immediately before this place and assign this place as new output; prune + all nodes that don't contribute to any output. + + Parameters + ---------- + place : Place + New place to be assigned as output. + + newNameOptional : str + Optional new name assigned to this output place. + )"); + + im.def("add_output", + &ngraph::frontend::InputModel::add_output, + py::arg("place"), + R"( + Assign this place as new output or add necessary nodes to represent a new output. + + Parameters + ---------- + place : Place + Anchor point to add an output. + )"); + + im.def("remove_output", + &ngraph::frontend::InputModel::remove_output, + py::arg("place"), + R"( + Removes any sinks directly attached to this place with all inbound data flow + if it is not required by any other output. + + Parameters + ---------- + place : Place + Model place + )"); + + im.def("set_partial_shape", + &ngraph::frontend::InputModel::set_partial_shape, + py::arg("place"), + py::arg("shape"), + R"( + Defines all possible shape that may be used for this place; place should be + uniquely refer to some data. This partial shape will be converted to corresponding + shape of results ngraph nodes and will define shape inference when the model is + converted to ngraph. + + Parameters + ---------- + place : Place + Model place. + + shape : PartialShape + Partial shape for this place. + )"); + + im.def("get_partial_shape", + &ngraph::frontend::InputModel::get_partial_shape, + py::arg("place"), + R"( + Returns current partial shape used for this place. + + Parameters + ---------- + place : Place + Model place + + Returns + ---------- + get_partial_shape : PartialShape + Partial shape for this place. + )"); + + im.def("get_inputs", + &ngraph::frontend::InputModel::get_inputs, + R"( + Returns all inputs for a model. + + Returns + ---------- + get_inputs : List[Place] + A list of input places. + )"); + + im.def("get_outputs", + &ngraph::frontend::InputModel::get_outputs, + R"( + Returns all outputs for a model. An output is a terminal place in a graph where data escapes the flow. + + Returns + ---------- + get_outputs : List[Place] + A list of output places + )"); + + im.def("extract_subgraph", + &ngraph::frontend::InputModel::extract_subgraph, + py::arg("inputs"), + py::arg("outputs"), + R"( + Leaves only subgraph that are defined by new inputs and new outputs. + + Parameters + ---------- + inputs : List[Place] + Array of new input places. + + outputs : List[Place] + Array of new output places. + )"); + + im.def("override_all_inputs", + &ngraph::frontend::InputModel::override_all_inputs, + py::arg("inputs"), + R"( + Modifies the graph to use new inputs instead of existing ones. New inputs + should completely satisfy all existing outputs. + + Parameters + ---------- + inputs : List[Place] + Array of new input places. + )"); + + im.def("override_all_outputs", + &ngraph::frontend::InputModel::override_all_outputs, + py::arg("outputs"), + R"( + Replaces all existing outputs with new ones removing all data flow that + is not required for new outputs. + + Parameters + ---------- + outputs : List[Place] + Vector with places that will become new outputs; may intersect existing outputs. + )"); + + im.def("set_element_type", + &ngraph::frontend::InputModel::set_element_type, + py::arg("place"), + py::arg("type"), + R"( + Sets new element type for a place. + + Parameters + ---------- + place : Place + Model place. + + type : ngraph.Type + New element type. + )"); +} diff --git a/ngraph/python/src/pyngraph/frontend/inputmodel.hpp b/ngraph/python/src/pyngraph/frontend/inputmodel.hpp new file mode 100644 index 00000000000..e95179c4a8c --- /dev/null +++ b/ngraph/python/src/pyngraph/frontend/inputmodel.hpp @@ -0,0 +1,11 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +namespace py = pybind11; + +void regclass_pyngraph_InputModel(py::module m); diff --git a/ngraph/python/src/pyngraph/frontend/place.cpp b/ngraph/python/src/pyngraph/frontend/place.cpp new file mode 100644 index 00000000000..19bc36ef13a --- /dev/null +++ b/ngraph/python/src/pyngraph/frontend/place.cpp @@ -0,0 +1,319 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include +#include + +#include "frontend_manager.hpp" +#include "frontend_manager/frontend_exceptions.hpp" +#include "frontend_manager/frontend_manager.hpp" +#include "pyngraph/function.hpp" + +namespace py = pybind11; + +void regclass_pyngraph_Place(py::module m) +{ + py::class_> place( + m, "Place", py::dynamic_attr()); + place.doc() = "ngraph.impl.Place wraps ngraph::frontend::Place"; + + place.def("is_input", + &ngraph::frontend::Place::is_input, + R"( + Returns true if this place is input for a model. + + Returns + ---------- + is_input : bool + True if this place is input for a model + )"); + + place.def("is_output", + &ngraph::frontend::Place::is_output, + R"( + Returns true if this place is output for a model. + + Returns + ---------- + is_output : bool + True if this place is output for a model. + )"); + + place.def("get_names", + &ngraph::frontend::Place::get_names, + R"( + All associated names (synonyms) that identify this place in the graph in a framework specific way. + + Returns + ---------- + get_names : List[str] + A vector of strings each representing a name that identifies this place in the graph. + Can be empty if there are no names associated with this place or name cannot be attached. + )"); + + place.def("is_equal", + &ngraph::frontend::Place::is_equal, + py::arg("other"), + R"( + Returns true if another place is the same as this place. + + Parameters + ---------- + other : Place + Another place object. + + Returns + ---------- + is_equal : bool + True if another place is the same as this place. + )"); + + place.def("is_equal_data", + &ngraph::frontend::Place::is_equal_data, + py::arg("other"), + R"( + Returns true if another place points to the same data. + Note: The same data means all places on path: + output port -> output edge -> tensor -> input edge -> input port. + + Parameters + ---------- + other : Place + Another place object. + + Returns + ---------- + is_equal_data : bool + True if another place points to the same data. + )"); + + place.def( + "get_consuming_operations", + [](const ngraph::frontend::Place& self, py::object outputPortIndex) { + if (outputPortIndex == py::none()) + { + return self.get_consuming_operations(); + } + else + { + return self.get_consuming_operations(py::cast(outputPortIndex)); + } + }, + py::arg("outputPortIndex") = py::none(), + R"( + Returns references to all operation nodes that consume data from this place for specified output port. + Note: It can be called for any kind of graph place searching for the first consuming operations. + + Parameters + ---------- + outputPortIndex : int + If place is an operational node it specifies which output port should be considered + May not be set if node has only one output port. + + Returns + ---------- + get_consuming_operations : List[Place] + A list with all operation node references that consumes data from this place + )"); + + place.def( + "get_target_tensor", + [](const ngraph::frontend::Place& self, py::object outputPortIndex) { + if (outputPortIndex == py::none()) + { + return self.get_target_tensor(); + } + else + { + return self.get_target_tensor(py::cast(outputPortIndex)); + } + }, + py::arg("outputPortIndex") = py::none(), + R"( + Returns a tensor place that gets data from this place; applicable for operations, + output ports and output edges. + + Parameters + ---------- + outputPortIndex : int + Output port index if the current place is an operation node and has multiple output ports. + May not be set if place has only one output port. + + Returns + ---------- + get_consuming_operations : Place + A tensor place which hold the resulting value for this place. + )"); + + place.def( + "get_producing_operation", + [](const ngraph::frontend::Place& self, py::object inputPortIndex) { + if (inputPortIndex == py::none()) + { + return self.get_producing_operation(); + } + else + { + return self.get_producing_operation(py::cast(inputPortIndex)); + } + }, + py::arg("inputPortIndex") = py::none(), + R"( + Get an operation node place that immediately produces data for this place. + + Parameters + ---------- + inputPortIndex : int + If a given place is itself an operation node, this specifies a port index. + May not be set if place has only one input port. + + Returns + ---------- + get_producing_operation : Place + An operation place that produces data for this place. + )"); + + place.def("get_producing_port", + &ngraph::frontend::Place::get_producing_port, + R"( + Returns a port that produces data for this place. + + Returns + ---------- + get_producing_port : Place + A port place that produces data for this place. + )"); + + place.def( + "get_input_port", + [](const ngraph::frontend::Place& self, py::object inputName, py::object inputPortIndex) { + if (inputName == py::none()) + { + if (inputPortIndex == py::none()) + { + return self.get_input_port(); + } + else + { + return self.get_input_port(py::cast(inputPortIndex)); + } + } + else + { + if (inputPortIndex == py::none()) + { + return self.get_input_port(py::cast(inputName)); + } + else + { + return self.get_input_port(py::cast(inputName), + py::cast(inputPortIndex)); + } + } + }, + py::arg("inputName") = py::none(), + py::arg("inputPortIndex") = py::none(), + R"( + For operation node returns reference to an input port with specified name and index. + + Parameters + ---------- + inputName : str + Name of port group. May not be set if node has one input port group. + + inputPortIndex : int + Input port index in a group. May not be set if node has one input port in a group. + + Returns + ---------- + get_input_port : Place + Appropriate input port place. + )"); + + place.def( + "get_output_port", + [](const ngraph::frontend::Place& self, py::object outputName, py::object outputPortIndex) { + if (outputName == py::none()) + { + if (outputPortIndex == py::none()) + { + return self.get_output_port(); + } + else + { + return self.get_output_port(py::cast(outputPortIndex)); + } + } + else + { + if (outputPortIndex == py::none()) + { + return self.get_output_port(py::cast(outputName)); + } + else + { + return self.get_output_port(py::cast(outputName), + py::cast(outputPortIndex)); + } + } + }, + py::arg("outputName") = py::none(), + py::arg("outputPortIndex") = py::none(), + R"( + For operation node returns reference to an output port with specified name and index. + + Parameters + ---------- + outputName : str + Name of output port group. May not be set if node has one output port group. + + outputPortIndex : int + Output port index. May not be set if node has one output port in a group. + + Returns + ---------- + get_output_port : Place + Appropriate output port place. + )"); + + place.def("get_consuming_ports", + &ngraph::frontend::Place::get_consuming_ports, + R"( + Returns all input ports that consume data flows through this place. + + Returns + ---------- + get_consuming_ports : List[Place] + Input ports that consume data flows through this place. + )"); + + place.def( + "get_source_tensor", + [](const ngraph::frontend::Place& self, py::object inputPortIndex) { + if (inputPortIndex == py::none()) + { + return self.get_source_tensor(); + } + else + { + return self.get_source_tensor(py::cast(inputPortIndex)); + } + }, + py::arg("inputPortIndex") = py::none(), + R"( + Returns a tensor place that supplies data for this place; applicable for operations, + input ports and input edges. + + Parameters + ---------- + inputPortIndex : int + Input port index for operational node. May not be specified if place has only one input port. + + Returns + ---------- + get_source_tensor : Place + A tensor place which supplies data for this place. + )"); +} diff --git a/ngraph/python/src/pyngraph/frontend/place.hpp b/ngraph/python/src/pyngraph/frontend/place.hpp new file mode 100644 index 00000000000..1a52e21f028 --- /dev/null +++ b/ngraph/python/src/pyngraph/frontend/place.hpp @@ -0,0 +1,11 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +namespace py = pybind11; + +void regclass_pyngraph_Place(py::module m); diff --git a/ngraph/python/src/pyngraph/pyngraph.cpp b/ngraph/python/src/pyngraph/pyngraph.cpp index 92b507b6483..0849de45f58 100644 --- a/ngraph/python/src/pyngraph/pyngraph.cpp +++ b/ngraph/python/src/pyngraph/pyngraph.cpp @@ -17,6 +17,10 @@ #include "pyngraph/onnx_import/onnx_import.hpp" #endif #include "pyngraph/dimension.hpp" +#include "pyngraph/frontend/frontend.hpp" +#include "pyngraph/frontend/frontend_manager.hpp" +#include "pyngraph/frontend/inputmodel.hpp" +#include "pyngraph/frontend/place.hpp" #include "pyngraph/ops/constant.hpp" #include "pyngraph/ops/parameter.hpp" #include "pyngraph/ops/result.hpp" @@ -41,6 +45,16 @@ PYBIND11_MODULE(_pyngraph, m) regclass_pyngraph_Shape(m); regclass_pyngraph_PartialShape(m); regclass_pyngraph_Node(m); + regclass_pyngraph_Place(m); + regclass_pyngraph_InitializationFailureFrontEnd(m); + regclass_pyngraph_GeneralFailureFrontEnd(m); + regclass_pyngraph_OpConversionFailureFrontEnd(m); + regclass_pyngraph_OpValidationFailureFrontEnd(m); + regclass_pyngraph_NotImplementedFailureFrontEnd(m); + regclass_pyngraph_FEC(m); + regclass_pyngraph_FrontEndManager(m); + regclass_pyngraph_FrontEnd(m); + regclass_pyngraph_InputModel(m); regclass_pyngraph_Input(m); regclass_pyngraph_Output(m); regclass_pyngraph_NodeFactory(m); diff --git a/ngraph/python/tests/mock/mock_py_ngraph_frontend/CMakeLists.txt b/ngraph/python/tests/mock/mock_py_ngraph_frontend/CMakeLists.txt new file mode 100644 index 00000000000..d39827b0f18 --- /dev/null +++ b/ngraph/python/tests/mock/mock_py_ngraph_frontend/CMakeLists.txt @@ -0,0 +1,22 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +set(TARGET_FE_NAME "mock_py_ngraph_frontend") + +file(GLOB_RECURSE LIBRARY_SRC ${CMAKE_CURRENT_SOURCE_DIR}/*.cpp) +file(GLOB_RECURSE LIBRARY_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/*.hpp) + +source_group("src" FILES ${LIBRARY_SRC}) +source_group("include" FILES ${LIBRARY_HEADERS}) + +# Create shared library +add_library(${TARGET_FE_NAME} SHARED ${LIBRARY_SRC} ${LIBRARY_HEADERS}) + +target_include_directories(${TARGET_FE_NAME} PRIVATE ".") + +target_include_directories(${TARGET_FE_NAME} PRIVATE ${FRONTEND_INCLUDE_PATH} ${NGRAPH_INCLUDE_PATH}) +target_link_libraries(${TARGET_FE_NAME} PRIVATE frontend_manager) +target_link_libraries(${TARGET_FE_NAME} PUBLIC ngraph PRIVATE ngraph::builder) + +add_clang_format_target(${TARGET_FE_NAME}_clang FOR_TARGETS ${TARGET_FE_NAME}) diff --git a/ngraph/python/tests/mock/mock_py_ngraph_frontend/mock_py_frontend.cpp b/ngraph/python/tests/mock/mock_py_ngraph_frontend/mock_py_frontend.cpp new file mode 100644 index 00000000000..22a6e23a2b0 --- /dev/null +++ b/ngraph/python/tests/mock/mock_py_ngraph_frontend/mock_py_frontend.cpp @@ -0,0 +1,25 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "mock_py_frontend.hpp" +#include "frontend_manager/frontend_manager.hpp" +#include "frontend_manager/frontend_manager_defs.hpp" +#include "ngraph/visibility.hpp" + +using namespace ngraph; +using namespace ngraph::frontend; + +extern "C" MOCK_API FrontEndVersion GetAPIVersion() +{ + return OV_FRONTEND_API_VERSION; +} + +extern "C" MOCK_API void* GetFrontEndData() +{ + FrontEndPluginInfo* res = new FrontEndPluginInfo(); + res->m_name = "mock_py"; + res->m_creator = [](FrontEndCapFlags flags) { return std::make_shared(flags); }; + + return res; +} diff --git a/ngraph/python/tests/mock/mock_py_ngraph_frontend/mock_py_frontend.hpp b/ngraph/python/tests/mock/mock_py_ngraph_frontend/mock_py_frontend.hpp new file mode 100644 index 00000000000..651e9e53809 --- /dev/null +++ b/ngraph/python/tests/mock/mock_py_ngraph_frontend/mock_py_frontend.hpp @@ -0,0 +1,542 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "frontend_manager/frontend_manager.hpp" +#include "frontend_manager/frontend_manager_defs.hpp" +#include "ngraph/visibility.hpp" + +// Defined if we are building the plugin DLL (instead of using it) +#ifdef mock_py_ngraph_frontend_EXPORTS +#define MOCK_API NGRAPH_HELPER_DLL_EXPORT +#else +#define MOCK_API NGRAPH_HELPER_DLL_IMPORT +#endif // mock1_ngraph_frontend_EXPORTS + +// OK to have 'using' in mock header + +using namespace ngraph; +using namespace ngraph::frontend; + +//////////////////////////////// + +struct MOCK_API PlaceStat +{ + int m_get_names = 0; + int m_get_consuming_operations = 0; + int m_get_target_tensor = 0; + int m_get_producing_operation = 0; + int m_get_producing_port = 0; + int m_get_input_port = 0; + int m_get_output_port = 0; + int m_get_consuming_ports = 0; + int m_is_input = 0; + int m_is_output = 0; + int m_is_equal = 0; + int m_is_equal_data = 0; + int m_get_source_tensor = 0; + + // Arguments tracking + std::string m_lastArgString; + int m_lastArgInt; + Place::Ptr m_lastArgPlace = nullptr; + + // Getters + int get_names() const { return m_get_names; } + int get_consuming_operations() const { return m_get_consuming_operations; } + int get_target_tensor() const { return m_get_target_tensor; } + int get_producing_operation() const { return m_get_producing_operation; } + int get_producing_port() const { return m_get_producing_port; } + int get_input_port() const { return m_get_input_port; } + int get_output_port() const { return m_get_output_port; } + int get_consuming_ports() const { return m_get_consuming_ports; } + int is_input() const { return m_is_input; } + int is_output() const { return m_is_output; } + int is_equal() const { return m_is_equal; } + int is_equal_data() const { return m_is_equal_data; } + int get_source_tensor() const { return m_get_source_tensor; } + + // Arguments getters + std::string get_lastArgString() const { return m_lastArgString; } + int get_lastArgInt() const { return m_lastArgInt; } + Place::Ptr get_lastArgPlace() const { return m_lastArgPlace; } +}; + +class MOCK_API PlaceMockPy : public Place +{ + mutable PlaceStat m_stat; + +public: + std::vector get_names() const override + { + m_stat.m_get_names++; + return {}; + } + + std::vector get_consuming_operations() const override + { + m_stat.m_get_consuming_operations++; + m_stat.m_lastArgInt = -1; + return {std::make_shared()}; + } + + std::vector get_consuming_operations(int outputPortIndex) const override + { + m_stat.m_get_consuming_operations++; + m_stat.m_lastArgInt = outputPortIndex; + return {std::make_shared()}; + } + + Place::Ptr get_target_tensor() const override + { + m_stat.m_get_target_tensor++; + m_stat.m_lastArgInt = -1; + return std::make_shared(); + } + + Place::Ptr get_target_tensor(int outputPortIndex) const override + { + m_stat.m_get_target_tensor++; + m_stat.m_lastArgInt = outputPortIndex; + return std::make_shared(); + } + + Place::Ptr get_producing_operation() const override + { + m_stat.m_get_producing_operation++; + m_stat.m_lastArgInt = -1; + return std::make_shared(); + } + + Place::Ptr get_producing_operation(int inputPortIndex) const override + { + m_stat.m_get_producing_operation++; + m_stat.m_lastArgInt = inputPortIndex; + return std::make_shared(); + } + + Place::Ptr get_producing_port() const override + { + m_stat.m_get_producing_port++; + return std::make_shared(); + } + + Place::Ptr get_input_port() const override + { + m_stat.m_get_input_port++; + m_stat.m_lastArgInt = -1; + return std::make_shared(); + } + + Place::Ptr get_input_port(int inputPortIndex) const override + { + m_stat.m_get_input_port++; + m_stat.m_lastArgInt = inputPortIndex; + return std::make_shared(); + } + + Place::Ptr get_input_port(const std::string& inputName) const override + { + m_stat.m_get_input_port++; + m_stat.m_lastArgInt = -1; + m_stat.m_lastArgString = inputName; + return std::make_shared(); + } + + Place::Ptr get_input_port(const std::string& inputName, int inputPortIndex) const override + { + m_stat.m_get_input_port++; + m_stat.m_lastArgInt = inputPortIndex; + m_stat.m_lastArgString = inputName; + return std::make_shared(); + } + + Place::Ptr get_output_port() const override + { + m_stat.m_get_output_port++; + m_stat.m_lastArgInt = -1; + return std::make_shared(); + } + + Place::Ptr get_output_port(int outputPortIndex) const override + { + m_stat.m_get_output_port++; + m_stat.m_lastArgInt = outputPortIndex; + return std::make_shared(); + } + + Place::Ptr get_output_port(const std::string& outputName) const override + { + m_stat.m_get_output_port++; + m_stat.m_lastArgInt = -1; + m_stat.m_lastArgString = outputName; + return std::make_shared(); + } + + Place::Ptr get_output_port(const std::string& outputName, int outputPortIndex) const override + { + m_stat.m_get_output_port++; + m_stat.m_lastArgInt = outputPortIndex; + m_stat.m_lastArgString = outputName; + return std::make_shared(); + } + + std::vector get_consuming_ports() const override + { + m_stat.m_get_consuming_ports++; + return {std::make_shared()}; + } + + bool is_input() const override + { + m_stat.m_is_input++; + return false; + } + + bool is_output() const override + { + m_stat.m_is_output++; + return false; + } + + bool is_equal(Ptr another) const override + { + m_stat.m_is_equal++; + m_stat.m_lastArgPlace = another; + return false; + } + + bool is_equal_data(Ptr another) const override + { + m_stat.m_is_equal_data++; + m_stat.m_lastArgPlace = another; + return false; + } + + Place::Ptr get_source_tensor(int inputPortIndex) const override + { + m_stat.m_get_source_tensor++; + m_stat.m_lastArgInt = inputPortIndex; + return {std::make_shared()}; + } + + Place::Ptr get_source_tensor() const override + { + m_stat.m_get_source_tensor++; + m_stat.m_lastArgInt = -1; + return {std::make_shared()}; + } + + //---------------Stat-------------------- + PlaceStat get_stat() const { return m_stat; } +}; + +//////////////////////////////// + +struct MOCK_API ModelStat +{ + int m_get_inputs = 0; + int m_get_outputs = 0; + int m_get_place_by_tensor_name = 0; + int m_get_place_by_operation_name = 0; + int m_get_place_by_operation_and_input_port = 0; + int m_get_place_by_operation_and_output_port = 0; + int m_set_name_for_tensor = 0; + int m_add_name_for_tensor = 0; + int m_set_name_for_operation = 0; + int m_free_name_for_tensor = 0; + int m_free_name_for_operation = 0; + int m_set_name_for_dimension = 0; + int m_cut_and_add_new_input = 0; + int m_cut_and_add_new_output = 0; + int m_add_output = 0; + int m_remove_output = 0; + int m_set_partial_shape = 0; + int m_get_partial_shape = 0; + int m_set_element_type = 0; + + int m_extract_subgraph = 0; + int m_override_all_inputs = 0; + int m_override_all_outputs = 0; + + // Arguments tracking + std::string m_lastArgString; + int m_lastArgInt; + Place::Ptr m_lastArgPlace = nullptr; + std::vector m_lastArgInputPlaces; + std::vector m_lastArgOutputPlaces; + ngraph::element::Type m_lastArgElementType; + ngraph::PartialShape m_lastArgPartialShape; + + // Getters + int get_inputs() const { return m_get_inputs; } + int get_outputs() const { return m_get_outputs; } + int extract_subgraph() const { return m_extract_subgraph; } + int override_all_inputs() const { return m_override_all_inputs; } + int override_all_outputs() const { return m_override_all_outputs; } + int get_place_by_tensor_name() const { return m_get_place_by_tensor_name; } + int get_place_by_operation_name() const { return m_get_place_by_operation_name; } + int get_place_by_operation_and_input_port() const + { + return m_get_place_by_operation_and_input_port; + } + int get_place_by_operation_and_output_port() const + { + return m_get_place_by_operation_and_output_port; + } + int set_name_for_tensor() const { return m_set_name_for_tensor; } + int add_name_for_tensor() const { return m_add_name_for_tensor; } + int set_name_for_operation() const { return m_set_name_for_operation; } + int free_name_for_tensor() const { return m_free_name_for_tensor; } + int free_name_for_operation() const { return m_free_name_for_operation; } + int set_name_for_dimension() const { return m_set_name_for_dimension; } + int cut_and_add_new_input() const { return m_cut_and_add_new_input; } + int cut_and_add_new_output() const { return m_cut_and_add_new_output; } + int add_output() const { return m_add_output; } + int remove_output() const { return m_remove_output; } + int set_partial_shape() const { return m_set_partial_shape; } + int get_partial_shape() const { return m_get_partial_shape; } + int set_element_type() const { return m_set_element_type; } + + // Arguments getters + std::string get_lastArgString() const { return m_lastArgString; } + int get_lastArgInt() const { return m_lastArgInt; } + Place::Ptr get_lastArgPlace() const { return m_lastArgPlace; } + std::vector get_lastArgInputPlaces() const { return m_lastArgInputPlaces; } + std::vector get_lastArgOutputPlaces() const { return m_lastArgOutputPlaces; } + ngraph::element::Type get_lastArgElementType() const { return m_lastArgElementType; } + ngraph::PartialShape get_lastArgPartialShape() const { return m_lastArgPartialShape; } +}; + +class MOCK_API InputModelMockPy : public InputModel +{ + mutable ModelStat m_stat; + +public: + std::vector get_inputs() const override + { + m_stat.m_get_inputs++; + return {std::make_shared()}; + } + + std::vector get_outputs() const override + { + m_stat.m_get_outputs++; + return {std::make_shared()}; + } + + Place::Ptr get_place_by_tensor_name(const std::string& tensorName) const override + { + m_stat.m_get_place_by_tensor_name++; + m_stat.m_lastArgString = tensorName; + return std::make_shared(); + } + + Place::Ptr get_place_by_operation_name(const std::string& operationName) override + { + m_stat.m_get_place_by_operation_name++; + m_stat.m_lastArgString = operationName; + return std::make_shared(); + } + + Place::Ptr get_place_by_operation_name_and_input_port(const std::string& operationName, + int inputPortIndex) override + { + m_stat.m_get_place_by_operation_and_input_port++; + m_stat.m_lastArgInt = inputPortIndex; + m_stat.m_lastArgString = operationName; + return std::make_shared(); + } + + Place::Ptr get_place_by_operation_name_and_output_port(const std::string& operationName, + int outputPortIndex) override + { + m_stat.m_get_place_by_operation_and_output_port++; + m_stat.m_lastArgInt = outputPortIndex; + m_stat.m_lastArgString = operationName; + return std::make_shared(); + } + + void set_name_for_tensor(Place::Ptr tensor, const std::string& newName) override + { + m_stat.m_set_name_for_tensor++; + m_stat.m_lastArgPlace = tensor; + m_stat.m_lastArgString = newName; + } + + void add_name_for_tensor(Place::Ptr tensor, const std::string& newName) override + { + m_stat.m_add_name_for_tensor++; + m_stat.m_lastArgPlace = tensor; + m_stat.m_lastArgString = newName; + } + + void set_name_for_operation(Place::Ptr operation, const std::string& newName) override + { + m_stat.m_set_name_for_operation++; + m_stat.m_lastArgPlace = operation; + m_stat.m_lastArgString = newName; + } + + void free_name_for_tensor(const std::string& name) override + { + m_stat.m_free_name_for_tensor++; + m_stat.m_lastArgString = name; + } + + void free_name_for_operation(const std::string& name) override + { + m_stat.m_free_name_for_operation++; + m_stat.m_lastArgString = name; + } + + void set_name_for_dimension(Place::Ptr place, + size_t shapeDimIndex, + const std::string& dimName) override + { + m_stat.m_set_name_for_dimension++; + m_stat.m_lastArgPlace = place; + m_stat.m_lastArgInt = static_cast(shapeDimIndex); + m_stat.m_lastArgString = dimName; + } + + void cut_and_add_new_input(Place::Ptr place, const std::string& newNameOptional) override + { + m_stat.m_cut_and_add_new_input++; + m_stat.m_lastArgPlace = place; + m_stat.m_lastArgString = newNameOptional; + } + + void cut_and_add_new_output(Place::Ptr place, const std::string& newNameOptional) override + { + m_stat.m_cut_and_add_new_output++; + m_stat.m_lastArgPlace = place; + m_stat.m_lastArgString = newNameOptional; + } + + Place::Ptr add_output(Place::Ptr place) override + { + m_stat.m_add_output++; + m_stat.m_lastArgPlace = place; + return std::make_shared(); + } + + void remove_output(Place::Ptr place) override + { + m_stat.m_remove_output++; + m_stat.m_lastArgPlace = place; + } + + void override_all_outputs(const std::vector& outputs) override + { + m_stat.m_override_all_outputs++; + m_stat.m_lastArgOutputPlaces = outputs; + } + + void override_all_inputs(const std::vector& inputs) override + { + m_stat.m_override_all_inputs++; + m_stat.m_lastArgInputPlaces = inputs; + } + + void extract_subgraph(const std::vector& inputs, + const std::vector& outputs) override + { + m_stat.m_extract_subgraph++; + m_stat.m_lastArgInputPlaces = inputs; + m_stat.m_lastArgOutputPlaces = outputs; + } + + // Setting tensor properties + void set_partial_shape(Place::Ptr place, const ngraph::PartialShape& shape) override + { + m_stat.m_set_partial_shape++; + m_stat.m_lastArgPlace = place; + m_stat.m_lastArgPartialShape = shape; + } + + ngraph::PartialShape get_partial_shape(Place::Ptr place) const override + { + m_stat.m_get_partial_shape++; + m_stat.m_lastArgPlace = place; + return {}; + } + + void set_element_type(Place::Ptr place, const ngraph::element::Type& type) override + { + m_stat.m_set_element_type++; + m_stat.m_lastArgPlace = place; + m_stat.m_lastArgElementType = type; + } + + //---------------Stat-------------------- + ModelStat get_stat() const { return m_stat; } +}; + +///////////////////////////////////////////////////////// + +struct MOCK_API FeStat +{ + FrontEndCapFlags m_load_flags; + std::vector m_load_paths; + int m_convert_model = 0; + int m_convert = 0; + int m_convert_partially = 0; + int m_decode = 0; + int m_normalize = 0; + // Getters + FrontEndCapFlags load_flags() const { return m_load_flags; } + std::vector load_paths() const { return m_load_paths; } + int convert_model() const { return m_convert_model; } + int convert() const { return m_convert; } + int convert_partially() const { return m_convert_partially; } + int decode() const { return m_decode; } + int normalize() const { return m_normalize; } +}; + +class MOCK_API FrontEndMockPy : public FrontEnd +{ + mutable FeStat m_stat; + +public: + FrontEndMockPy(FrontEndCapFlags flags) { m_stat.m_load_flags = flags; } + + InputModel::Ptr load_from_file(const std::string& path) const override + { + m_stat.m_load_paths.push_back(path); + return std::make_shared(); + } + + std::shared_ptr convert(InputModel::Ptr model) const override + { + m_stat.m_convert_model++; + return std::make_shared(NodeVector{}, ParameterVector{}); + } + + std::shared_ptr convert(std::shared_ptr func) const override + { + m_stat.m_convert++; + return func; + } + + std::shared_ptr convert_partially(InputModel::Ptr model) const override + { + m_stat.m_convert_partially++; + return std::make_shared(NodeVector{}, ParameterVector{}); + } + + std::shared_ptr decode(InputModel::Ptr model) const override + { + m_stat.m_decode++; + return std::make_shared(NodeVector{}, ParameterVector{}); + } + + void normalize(std::shared_ptr function) const override + { + m_stat.m_normalize++; + } + + FeStat get_stat() const { return m_stat; } +}; diff --git a/ngraph/python/tests/mock/pyngraph_fe_mock_api/CMakeLists.txt b/ngraph/python/tests/mock/pyngraph_fe_mock_api/CMakeLists.txt new file mode 100644 index 00000000000..7d2e4a3077a --- /dev/null +++ b/ngraph/python/tests/mock/pyngraph_fe_mock_api/CMakeLists.txt @@ -0,0 +1,19 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +set(TARGET_FE_NAME "mock_py_ngraph_frontend") +set(PYBIND_FE_NAME "pybind_mock_frontend") + +set(PYBIND_FE_SRC ${CMAKE_CURRENT_SOURCE_DIR}/pyngraph_mock_frontend_api.cpp) + +source_group("src" FILES ${PYBIND_FE_SRC}) + +pybind11_add_module(${PYBIND_FE_NAME} MODULE ${PYBIND_FE_SRC}) + +target_link_libraries(${PYBIND_FE_NAME} PRIVATE ngraph::ngraph ngraph::frontend_manager) +target_link_libraries(${PYBIND_FE_NAME} PRIVATE ${TARGET_FE_NAME}) + +add_dependencies(${PYBIND_FE_NAME} ${TARGET_FE_NAME}) + +add_clang_format_target(${PYBIND_FE_NAME}_clang FOR_TARGETS ${PYBIND_FE_NAME}) diff --git a/ngraph/python/tests/mock/pyngraph_fe_mock_api/pyngraph_mock_frontend_api.cpp b/ngraph/python/tests/mock/pyngraph_fe_mock_api/pyngraph_mock_frontend_api.cpp new file mode 100644 index 00000000000..ec87842d417 --- /dev/null +++ b/ngraph/python/tests/mock/pyngraph_fe_mock_api/pyngraph_mock_frontend_api.cpp @@ -0,0 +1,136 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include + +#include "../mock_py_ngraph_frontend/mock_py_frontend.hpp" + +namespace py = pybind11; +using namespace ngraph; +using namespace ngraph::frontend; + +static void register_mock_frontend_stat(py::module m) +{ + m.def( + "get_fe_stat", + [](const std::shared_ptr& fe) { + std::shared_ptr ptr = std::dynamic_pointer_cast(fe); + if (ptr) + { + auto stat = ptr->get_stat(); + return stat; + } + return FeStat(); + }, + py::arg("frontend")); + + py::class_ feStat(m, "FeStat", py::dynamic_attr()); + feStat.def_property_readonly("load_flags", &FeStat::load_flags); + feStat.def_property_readonly("load_paths", &FeStat::load_paths); + feStat.def_property_readonly("convert_model", &FeStat::convert_model); + feStat.def_property_readonly("convert", &FeStat::convert); + feStat.def_property_readonly("convert_partially", &FeStat::convert_partially); + feStat.def_property_readonly("decode", &FeStat::decode); + feStat.def_property_readonly("normalize", &FeStat::normalize); +} + +static void register_mock_model_stat(py::module m) +{ + m.def( + "get_mdl_stat", + [](const std::shared_ptr& mdl) { + std::shared_ptr ptr = + std::dynamic_pointer_cast(mdl); + if (ptr) + { + auto stat = ptr->get_stat(); + return stat; + } + return ModelStat(); + }, + py::arg("model")); + + py::class_ mdlStat(m, "ModelStat", py::dynamic_attr()); + mdlStat.def_property_readonly("get_inputs", &ModelStat::get_inputs); + mdlStat.def_property_readonly("get_outputs", &ModelStat::get_outputs); + mdlStat.def_property_readonly("get_place_by_tensor_name", &ModelStat::get_place_by_tensor_name); + mdlStat.def_property_readonly("get_place_by_operation_name", + &ModelStat::get_place_by_operation_name); + mdlStat.def_property_readonly("get_place_by_operation_and_input_port", + &ModelStat::get_place_by_operation_and_input_port); + mdlStat.def_property_readonly("get_place_by_operation_and_output_port", + &ModelStat::get_place_by_operation_and_output_port); + + mdlStat.def_property_readonly("set_name_for_tensor", &ModelStat::set_name_for_tensor); + mdlStat.def_property_readonly("add_name_for_tensor", &ModelStat::add_name_for_tensor); + mdlStat.def_property_readonly("set_name_for_operation", &ModelStat::set_name_for_operation); + mdlStat.def_property_readonly("free_name_for_tensor", &ModelStat::free_name_for_tensor); + mdlStat.def_property_readonly("free_name_for_operation", &ModelStat::free_name_for_operation); + mdlStat.def_property_readonly("set_name_for_dimension", &ModelStat::set_name_for_dimension); + mdlStat.def_property_readonly("cut_and_add_new_input", &ModelStat::cut_and_add_new_input); + mdlStat.def_property_readonly("cut_and_add_new_output", &ModelStat::cut_and_add_new_output); + mdlStat.def_property_readonly("add_output", &ModelStat::add_output); + mdlStat.def_property_readonly("remove_output", &ModelStat::remove_output); + mdlStat.def_property_readonly("set_partial_shape", &ModelStat::set_partial_shape); + mdlStat.def_property_readonly("get_partial_shape", &ModelStat::get_partial_shape); + mdlStat.def_property_readonly("set_element_type", &ModelStat::set_element_type); + mdlStat.def_property_readonly("extract_subgraph", &ModelStat::extract_subgraph); + mdlStat.def_property_readonly("override_all_inputs", &ModelStat::override_all_inputs); + mdlStat.def_property_readonly("override_all_outputs", &ModelStat::override_all_outputs); + + // Arguments tracking + mdlStat.def_property_readonly("lastArgString", &ModelStat::get_lastArgString); + mdlStat.def_property_readonly("lastArgInt", &ModelStat::get_lastArgInt); + mdlStat.def_property_readonly("lastArgPlace", &ModelStat::get_lastArgPlace); + mdlStat.def_property_readonly("lastArgInputPlaces", &ModelStat::get_lastArgInputPlaces); + mdlStat.def_property_readonly("lastArgOutputPlaces", &ModelStat::get_lastArgOutputPlaces); + mdlStat.def_property_readonly("lastArgElementType", &ModelStat::get_lastArgElementType); + mdlStat.def_property_readonly("lastArgPartialShape", &ModelStat::get_lastArgPartialShape); +} + +static void register_mock_place_stat(py::module m) +{ + m.def( + "get_place_stat", + [](const Place::Ptr& fe) { + std::shared_ptr ptr = std::dynamic_pointer_cast(fe); + if (ptr) + { + auto stat = ptr->get_stat(); + return stat; + } + return PlaceStat(); + }, + py::arg("place")); + + py::class_ placeStat(m, "PlaceStat", py::dynamic_attr()); + + placeStat.def_property_readonly("lastArgString", &PlaceStat::get_lastArgString); + placeStat.def_property_readonly("lastArgInt", &PlaceStat::get_lastArgInt); + placeStat.def_property_readonly("lastArgPlace", &PlaceStat::get_lastArgPlace); + + placeStat.def_property_readonly("get_names", &PlaceStat::get_names); + placeStat.def_property_readonly("get_consuming_operations", + &PlaceStat::get_consuming_operations); + placeStat.def_property_readonly("get_target_tensor", &PlaceStat::get_target_tensor); + placeStat.def_property_readonly("get_producing_operation", &PlaceStat::get_producing_operation); + placeStat.def_property_readonly("get_producing_port", &PlaceStat::get_producing_port); + placeStat.def_property_readonly("get_input_port", &PlaceStat::get_input_port); + placeStat.def_property_readonly("get_output_port", &PlaceStat::get_output_port); + placeStat.def_property_readonly("get_consuming_ports", &PlaceStat::get_consuming_ports); + placeStat.def_property_readonly("is_input", &PlaceStat::is_input); + placeStat.def_property_readonly("is_output", &PlaceStat::is_output); + placeStat.def_property_readonly("is_equal", &PlaceStat::is_equal); + placeStat.def_property_readonly("is_equal_data", &PlaceStat::is_equal_data); + placeStat.def_property_readonly("get_source_tensor", &PlaceStat::get_source_tensor); +} + +PYBIND11_MODULE(pybind_mock_frontend, m) +{ + m.doc() = "Mock frontend call counters for testing Pyngraph frontend bindings"; + register_mock_frontend_stat(m); + register_mock_model_stat(m); + register_mock_place_stat(m); +} diff --git a/ngraph/python/tests/test_ngraph/test_frontendmanager.py b/ngraph/python/tests/test_ngraph/test_frontendmanager.py new file mode 100644 index 00000000000..6c3d080657c --- /dev/null +++ b/ngraph/python/tests/test_ngraph/test_frontendmanager.py @@ -0,0 +1,545 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import pytest + +from ngraph import PartialShape +from ngraph.frontend import FrontEndCapabilities, FrontEndManager, InitializationFailure +from ngraph.utils.types import get_element_type + +mock_available = True +try: + from pybind_mock_frontend import get_fe_stat, get_mdl_stat, get_place_stat +except Exception: + print("No mock frontend available") + mock_available = False + +# FrontEndManager shall be initialized and destroyed after all tests finished +# This is because destroy of FrontEndManager will unload all plugins, no objects shall exist after this +fem = FrontEndManager() + +mock_needed = pytest.mark.skipif(not mock_available, + reason="mock fe is not available") + + +# ---------- FrontEnd tests --------------- +@mock_needed +def test_load_by_framework_caps(): + frontEnds = fem.get_available_front_ends() + assert frontEnds is not None + assert "mock_py" in frontEnds + caps = [FrontEndCapabilities.DEFAULT, + FrontEndCapabilities.CUT, + FrontEndCapabilities.NAMES, + FrontEndCapabilities.WILDCARDS, + FrontEndCapabilities.CUT | FrontEndCapabilities.NAMES | FrontEndCapabilities.WILDCARDS] + for cap in caps: + fe = fem.load_by_framework(framework="mock_py", capabilities=cap) + stat = get_fe_stat(fe) + assert cap == stat.load_flags + for i in range(len(caps) - 1): + for j in range(i + 1, len(caps)): + assert caps[i] != caps[j] + + +def test_load_by_unknown_framework(): + frontEnds = fem.get_available_front_ends() + assert not("UnknownFramework" in frontEnds) + try: + fem.load_by_framework("UnknownFramework") + except InitializationFailure as exc: + print(exc) + else: + raise AssertionError("Unexpected exception.") + + +@mock_needed +def test_load_from_file(): + fe = fem.load_by_framework(framework="mock_py") + assert fe is not None + model = fe.load_from_file("abc.bin") + assert model is not None + stat = get_fe_stat(fe) + assert "abc.bin" in stat.load_paths + + +@mock_needed +def test_convert_model(): + fe = fem.load_by_framework(framework="mock_py") + assert fe is not None + model = fe.load_from_file(path="") + func = fe.convert(model=model) + assert func is not None + stat = get_fe_stat(fe) + assert stat.convert_model == 1 + + +@mock_needed +def test_convert_partially(): + fe = fem.load_by_framework(framework="mock_py") + assert fe is not None + model = fe.load_from_file(path="") + func = fe.convert_partially(model=model) + stat = get_fe_stat(fe) + assert stat.convert_partially == 1 + fe.convert(function=func) + stat = get_fe_stat(fe) + assert stat.convert == 1 + + +@mock_needed +def test_decode_and_normalize(): + fe = fem.load_by_framework(framework="mock_py") + assert fe is not None + model = fe.load_from_file(path="") + func = fe.decode(model=model) + stat = get_fe_stat(fe) + assert stat.decode == 1 + fe.normalize(function=func) + stat = get_fe_stat(fe) + assert stat.normalize == 1 + assert stat.decode == 1 + + +# --------InputModel tests----------------- +@mock_needed +def init_model(): + fe = fem.load_by_framework(framework="mock_py") + model = fe.load_from_file(path="") + return model + + +@mock_needed +def test_model_get_inputs(): + model = init_model() + for i in range(1, 10): + model.get_inputs() + stat = get_mdl_stat(model) + assert stat.get_inputs == i + + +@mock_needed +def test_model_get_outputs(): + model = init_model() + for i in range(1, 10): + model.get_outputs() + stat = get_mdl_stat(model) + assert stat.get_outputs == i + + +@mock_needed +def test_model_get_place_by_tensor_name(): + model = init_model() + for i in range(1, 10): + name = str(i) + model.get_place_by_tensor_name(tensorName=name) + stat = get_mdl_stat(model) + assert stat.get_place_by_tensor_name == i + assert stat.lastArgString == name + + +@mock_needed +def test_model_get_place_by_operation_name(): + model = init_model() + for i in range(1, 10): + name = str(i) + model.get_place_by_operation_name(operationName=name) + stat = get_mdl_stat(model) + assert stat.get_place_by_operation_name == i + assert stat.lastArgString == name + + +@mock_needed +def test_model_get_place_by_operation_name_and_input_port(): + model = init_model() + for i in range(1, 10): + name = str(i) + model.get_place_by_operation_name_and_input_port(operationName=name, inputPortIndex=i * 2) + stat = get_mdl_stat(model) + assert stat.get_place_by_operation_and_input_port == i + assert stat.lastArgString == name + assert stat.lastArgInt == i * 2 + + +@mock_needed +def test_model_get_place_by_operation_name_and_output_port(): + model = init_model() + for i in range(1, 10): + name = str(i) + model.get_place_by_operation_name_and_output_port(operationName=name, outputPortIndex=i * 2) + stat = get_mdl_stat(model) + assert stat.get_place_by_operation_and_output_port == i + assert stat.lastArgString == name + assert stat.lastArgInt == i * 2 + + +@mock_needed +def test_model_set_name_for_tensor(): + model = init_model() + place = model.get_place_by_tensor_name(tensorName="") + model.set_name_for_tensor(tensor=place, newName="1234") + stat = get_mdl_stat(model) + assert stat.set_name_for_tensor == 1 + assert stat.lastArgString == "1234" + assert stat.lastArgPlace == place + + +@mock_needed +def test_model_add_name_for_tensor(): + model = init_model() + place = model.get_place_by_tensor_name(tensorName="") + model.add_name_for_tensor(tensor=place, newName="1234") + stat = get_mdl_stat(model) + assert stat.add_name_for_tensor == 1 + assert stat.lastArgString == "1234" + assert stat.lastArgPlace == place + + +@mock_needed +def test_model_set_name_for_operation(): + model = init_model() + place = model.get_place_by_operation_name(operationName="") + model.set_name_for_operation(operation=place, newName="1111") + stat = get_mdl_stat(model) + assert stat.set_name_for_operation == 1 + assert stat.lastArgString == "1111" + assert stat.lastArgPlace == place + + +@mock_needed +def test_model_free_name_for_tensor(): + model = init_model() + model.free_name_for_tensor(name="2222") + stat = get_mdl_stat(model) + assert stat.free_name_for_tensor == 1 + assert stat.lastArgString == "2222" + + +@mock_needed +def test_model_free_name_for_operation(): + model = init_model() + model.free_name_for_operation(name="3333") + stat = get_mdl_stat(model) + assert stat.free_name_for_operation == 1 + assert stat.lastArgString == "3333" + + +@mock_needed +def test_model_set_name_for_dimension(): + model = init_model() + place = model.get_place_by_operation_name(operationName="") + model.set_name_for_dimension(place=place, dimIndex=123, dimName="4444") + stat = get_mdl_stat(model) + assert stat.set_name_for_dimension == 1 + assert stat.lastArgString == "4444" + assert stat.lastArgInt == 123 + assert stat.lastArgPlace == place + + +@mock_needed +def test_model_cut_and_add_new_input(): + model = init_model() + place = model.get_place_by_operation_name("") + model.cut_and_add_new_input(place=place, newName="5555") + stat = get_mdl_stat(model) + assert stat.cut_and_add_new_input == 1 + assert stat.lastArgString == "5555" + assert stat.lastArgPlace == place + model.cut_and_add_new_input(place=place) + stat = get_mdl_stat(model) + assert stat.cut_and_add_new_input == 2 + assert stat.lastArgString == "" + assert stat.lastArgPlace == place + + +@mock_needed +def test_model_cut_and_add_new_output(): + model = init_model() + place = model.get_place_by_operation_name("") + model.cut_and_add_new_output(place=place, newName="5555") + stat = get_mdl_stat(model) + assert stat.cut_and_add_new_output == 1 + assert stat.lastArgString == "5555" + assert stat.lastArgPlace == place + model.cut_and_add_new_output(place=place) + stat = get_mdl_stat(model) + assert stat.cut_and_add_new_output == 2 + assert stat.lastArgString == "" + assert stat.lastArgPlace == place + + +@mock_needed +def test_model_add_output(): + model = init_model() + place = model.get_place_by_operation_name("") + place2 = model.add_output(place=place) + assert place2 is not None + stat = get_mdl_stat(model) + assert stat.add_output == 1 + assert stat.lastArgPlace == place + + +@mock_needed +def test_model_remove_output(): + model = init_model() + place = model.get_place_by_operation_name("") + model.remove_output(place=place) + stat = get_mdl_stat(model) + assert stat.remove_output == 1 + assert stat.lastArgPlace == place + + +@mock_needed +def test_model_set_partial_shape(): + model = init_model() + place = model.get_place_by_tensor_name(tensorName="") + test_shape = PartialShape([1, 2, 3, 4]) + model.set_partial_shape(place=place, shape=test_shape) + stat = get_mdl_stat(model) + assert stat.set_partial_shape == 1 + assert stat.lastArgPlace == place + assert stat.lastArgPartialShape == test_shape + + +@mock_needed +def test_model_get_partial_shape(): + model = init_model() + place = model.get_place_by_tensor_name(tensorName="") + shape = model.get_partial_shape(place=place) + assert shape is not None + stat = get_mdl_stat(model) + assert stat.get_partial_shape == 1 + assert stat.lastArgPlace == place + + +@mock_needed +def test_model_override_all_inputs(): + model = init_model() + place1 = model.get_place_by_tensor_name(tensorName="p1") + place2 = model.get_place_by_tensor_name(tensorName="p2") + model.override_all_inputs(inputs=[place1, place2]) + stat = get_mdl_stat(model) + assert stat.override_all_inputs == 1 + assert len(stat.lastArgInputPlaces) == 2 + assert stat.lastArgInputPlaces[0] == place1 + assert stat.lastArgInputPlaces[1] == place2 + + +@mock_needed +def test_model_override_all_outputs(): + model = init_model() + place1 = model.get_place_by_tensor_name(tensorName="p1") + place2 = model.get_place_by_tensor_name(tensorName="p2") + model.override_all_outputs(outputs=[place1, place2]) + stat = get_mdl_stat(model) + assert stat.override_all_outputs == 1 + assert len(stat.lastArgOutputPlaces) == 2 + assert stat.lastArgOutputPlaces[0] == place1 + assert stat.lastArgOutputPlaces[1] == place2 + + +@mock_needed +def test_model_extract_subgraph(): + model = init_model() + place1 = model.get_place_by_tensor_name(tensorName="p1") + place2 = model.get_place_by_tensor_name(tensorName="p2") + place3 = model.get_place_by_tensor_name(tensorName="p3") + place4 = model.get_place_by_tensor_name(tensorName="p4") + model.extract_subgraph(inputs=[place1, place2], outputs=[place3, place4]) + stat = get_mdl_stat(model) + assert stat.extract_subgraph == 1 + assert len(stat.lastArgInputPlaces) == 2 + assert stat.lastArgInputPlaces[0] == place1 + assert stat.lastArgInputPlaces[1] == place2 + assert len(stat.lastArgOutputPlaces) == 2 + assert stat.lastArgOutputPlaces[0] == place3 + assert stat.lastArgOutputPlaces[1] == place4 + + +@mock_needed +def test_model_set_element_type(): + model = init_model() + place = model.get_place_by_tensor_name(tensorName="") + model.set_element_type(place=place, type=get_element_type(np.int32)) + stat = get_mdl_stat(model) + assert stat.set_element_type == 1 + assert stat.lastArgPlace == place + assert stat.lastArgElementType == get_element_type(np.int32) + + +# ----------- Place test ------------ +@mock_needed +def init_place(): + fe = fem.load_by_framework(framework="mock_py") + model = fe.load_from_file(path="") + place = model.get_place_by_tensor_name(tensorName="") + return model, place + + +@mock_needed +def test_place_is_input(): + _, place = init_place() + assert place.is_input() is not None + stat = get_place_stat(place) + assert stat.is_input == 1 + + +@mock_needed +def test_place_is_output(): + _, place = init_place() + assert place.is_output() is not None + stat = get_place_stat(place) + assert stat.is_output == 1 + + +@mock_needed +def test_place_get_names(): + _, place = init_place() + assert place.get_names() is not None + stat = get_place_stat(place) + assert stat.get_names == 1 + + +@mock_needed +def test_place_is_equal(): + model, place = init_place() + place2 = model.get_place_by_tensor_name("2") + assert place.is_equal(other=place2) is not None + stat = get_place_stat(place) + assert stat.is_equal == 1 + assert stat.lastArgPlace == place2 + + +@mock_needed +def test_place_is_equal_data(): + model, place = init_place() + place2 = model.get_place_by_tensor_name("2") + assert place.is_equal_data(other=place2) is not None + stat = get_place_stat(place) + assert stat.is_equal_data == 1 + assert stat.lastArgPlace == place2 + + +@mock_needed +def test_place_get_consuming_operations(): + _, place = init_place() + assert place.get_consuming_operations(outputPortIndex=22) is not None + stat = get_place_stat(place) + assert stat.get_consuming_operations == 1 + assert stat.lastArgInt == 22 + assert place.get_consuming_operations() is not None + stat = get_place_stat(place) + assert stat.get_consuming_operations == 2 + assert stat.lastArgInt == -1 + + +@mock_needed +def test_place_get_target_tensor(): + _, place = init_place() + assert place.get_target_tensor(outputPortIndex=22) is not None + stat = get_place_stat(place) + assert stat.get_target_tensor == 1 + assert stat.lastArgInt == 22 + assert place.get_target_tensor() is not None + stat = get_place_stat(place) + assert stat.get_target_tensor == 2 + assert stat.lastArgInt == -1 + + +@mock_needed +def test_place_get_producing_operation(): + _, place = init_place() + assert place.get_producing_operation(inputPortIndex=22) is not None + stat = get_place_stat(place) + assert stat.get_producing_operation == 1 + assert stat.lastArgInt == 22 + assert place.get_producing_operation() is not None + stat = get_place_stat(place) + assert stat.get_producing_operation == 2 + assert stat.lastArgInt == -1 + + +@mock_needed +def test_place_get_producing_port(): + _, place = init_place() + assert place.get_producing_port() is not None + stat = get_place_stat(place) + assert stat.get_producing_port == 1 + + +@mock_needed +def test_place_get_input_port(): + _, place = init_place() + assert place.get_input_port() is not None + stat = get_place_stat(place) + assert stat.get_input_port == 1 + assert stat.lastArgInt == -1 + assert place.get_input_port(inputPortIndex=22) is not None + stat = get_place_stat(place) + assert stat.get_input_port == 2 + assert stat.lastArgInt == 22 + + +@mock_needed +def test_place_get_input_port2(): + _, place = init_place() + assert place.get_input_port(inputName="abc") is not None + stat = get_place_stat(place) + assert stat.get_input_port == 1 + assert stat.lastArgInt == -1 + assert stat.lastArgString == "abc" + assert place.get_input_port(inputName="abcd", inputPortIndex=22) is not None + stat = get_place_stat(place) + assert stat.get_input_port == 2 + assert stat.lastArgInt == 22 + assert stat.lastArgString == "abcd" + + +@mock_needed +def test_place_get_output_port(): + _, place = init_place() + assert place.get_output_port() is not None + stat = get_place_stat(place) + assert stat.get_output_port == 1 + assert stat.lastArgInt == -1 + assert place.get_output_port(outputPortIndex=22) is not None + stat = get_place_stat(place) + assert stat.get_output_port == 2 + assert stat.lastArgInt == 22 + + +@mock_needed +def test_place_get_output_port2(): + _, place = init_place() + assert place.get_output_port(outputName="abc") is not None + stat = get_place_stat(place) + assert stat.get_output_port == 1 + assert stat.lastArgInt == -1 + assert stat.lastArgString == "abc" + assert place.get_output_port(outputName="abcd", outputPortIndex=22) is not None + stat = get_place_stat(place) + assert stat.get_output_port == 2 + assert stat.lastArgInt == 22 + assert stat.lastArgString == "abcd" + + +@mock_needed +def test_place_get_consuming_ports(): + _, place = init_place() + assert place.get_consuming_ports() is not None + stat = get_place_stat(place) + assert stat.get_consuming_ports == 1 + + +@mock_needed +def test_place_get_source_tensor(): + _, place = init_place() + assert place.get_source_tensor() is not None + stat = get_place_stat(place) + assert stat.get_source_tensor == 1 + assert stat.lastArgInt == -1 + assert place.get_source_tensor(inputPortIndex=22) is not None + stat = get_place_stat(place) + assert stat.get_source_tensor == 2 + assert stat.lastArgInt == 22