From 86c4489aca2ea9c7abe2362113d792151cc42d84 Mon Sep 17 00:00:00 2001 From: Maxim Vafin Date: Fri, 24 Mar 2023 10:11:12 +0100 Subject: [PATCH] [PT FE] Add telemetry extension support (#16438) * Initial telemetry introduction in PyTorch frontend * Add test * remove obsolete checks from test * Move statistics gathering into TranslateSession * Fix code style * Fix codestyle --- .../openvino/frontend/pytorch/frontend.hpp | 2 + src/frontends/pytorch/src/frontend.cpp | 11 ++++-- src/frontends/pytorch/src/input_model.cpp | 2 +- src/frontends/pytorch/src/input_model.hpp | 3 +- .../pytorch/src/translate_session.cpp | 17 ++++++++- .../pytorch/src/translate_session.hpp | 9 ++++- .../py_frontend_tests/test_torch_frontend.py | 38 +++++++++++++++++++ 7 files changed, 71 insertions(+), 11 deletions(-) diff --git a/src/frontends/pytorch/include/openvino/frontend/pytorch/frontend.hpp b/src/frontends/pytorch/include/openvino/frontend/pytorch/frontend.hpp index 9bd62ada8ff..8100592e797 100644 --- a/src/frontends/pytorch/include/openvino/frontend/pytorch/frontend.hpp +++ b/src/frontends/pytorch/include/openvino/frontend/pytorch/frontend.hpp @@ -4,6 +4,7 @@ #pragma once +#include "openvino/frontend/extension/telemetry.hpp" #include "openvino/frontend/frontend.hpp" #include "openvino/frontend/pytorch/node_context.hpp" #include "openvino/frontend/pytorch/visibility.hpp" @@ -61,6 +62,7 @@ protected: ov::frontend::InputModel::Ptr load_impl(const std::vector& variants) const override; std::map m_op_translators; + TelemetryExtension::Ptr m_telemetry; }; } // namespace pytorch diff --git a/src/frontends/pytorch/src/frontend.cpp b/src/frontends/pytorch/src/frontend.cpp index 685b14c157d..45a7ef2f2b5 100644 --- a/src/frontends/pytorch/src/frontend.cpp +++ b/src/frontends/pytorch/src/frontend.cpp @@ -61,6 +61,9 @@ std::shared_ptr FrontEnd::convert(const InputModel::Ptr& model) const { std::set unconverted_ops_types = get_unconverted_types_from_model(converted_model); std::stringstream ops_str; for (auto&& op_type : unconverted_ops_types) { + if (m_telemetry) { + m_telemetry->send_event("error_cause", "pytorch_" + op_type); + } ops_str << op_type << '\n'; } FRONT_END_OP_CONVERSION_CHECK(unconverted_ops_types.size() == 0, @@ -75,7 +78,7 @@ void FrontEnd::convert(const std::shared_ptr& partiallyConverted) const { std::shared_ptr FrontEnd::convert_partially(const ov::frontend::InputModel::Ptr& model) const { FRONT_END_GENERAL_CHECK(std::dynamic_pointer_cast(model), "Invalid input model"); try { - TranslateSession translate_session(model, m_op_translators); + TranslateSession translate_session(model, m_op_translators, m_telemetry); return translate_session.get_converted_model(); } catch (const std::runtime_error& e) { std::cerr << "[ ERROR ] Unexpected error while converting pytorch model: " << e.what() << '\n'; @@ -132,9 +135,9 @@ void FrontEnd::normalize(const std::shared_ptr& model) const { } void FrontEnd::add_extension(const std::shared_ptr& extension) { - // Extension loading mechanism is not implemented, any extensions will be ignored - // see CVS-98766 for tracking progress - return; + if (const auto& telemetry = std::dynamic_pointer_cast(extension)) { + m_telemetry = telemetry; + } } bool FrontEnd::supported_impl(const std::vector& variants) const { diff --git a/src/frontends/pytorch/src/input_model.cpp b/src/frontends/pytorch/src/input_model.cpp index d273b091b8d..ae4d15f6c43 100644 --- a/src/frontends/pytorch/src/input_model.cpp +++ b/src/frontends/pytorch/src/input_model.cpp @@ -11,7 +11,7 @@ namespace ov { namespace frontend { namespace pytorch { -InputModel::InputModel(std::shared_ptr model_decoder) : m_model_decoder(model_decoder) { +InputModel::InputModel(const std::shared_ptr& model_decoder) : m_model_decoder(model_decoder) { const auto& inputs = m_model_decoder->inputs(); for (size_t i = 0; i < inputs.size(); ++i) { auto in_place = std::make_shared(*this, inputs[i]); diff --git a/src/frontends/pytorch/src/input_model.hpp b/src/frontends/pytorch/src/input_model.hpp index 14ede27c23e..c4517129a4b 100644 --- a/src/frontends/pytorch/src/input_model.hpp +++ b/src/frontends/pytorch/src/input_model.hpp @@ -31,8 +31,7 @@ class InputModel : public ov::frontend::InputModel { friend class ::ov::frontend::pytorch::Place; public: - // TODO: pass telemetry extension to this ctor - explicit InputModel(std::shared_ptr model_decoder); + explicit InputModel(const std::shared_ptr& model_decoder); std::vector get_inputs() const override; std::vector get_outputs() const override; diff --git a/src/frontends/pytorch/src/translate_session.cpp b/src/frontends/pytorch/src/translate_session.cpp index 376b466c0a2..89e19a1609e 100644 --- a/src/frontends/pytorch/src/translate_session.cpp +++ b/src/frontends/pytorch/src/translate_session.cpp @@ -20,11 +20,22 @@ namespace pytorch { using namespace ov::op; TranslateSession::TranslateSession(const ov::frontend::InputModel::Ptr& input_model, - const std::map& translator_map) + const std::map& translator_map, + const std::shared_ptr& telemetry) : m_input_model(input_model), m_translator_map(translator_map), + m_telemetry(telemetry), m_ov_model(nullptr) {} +TranslateSession::~TranslateSession() { + if (m_telemetry) { + // Send statistics + for (const auto& op : m_op_statistics) { + m_telemetry->send_event("op_count", "pytorch_" + op.first, static_cast(op.second)); + } + } +} + std::shared_ptr TranslateSession::get_converted_model() { if (m_ov_model) { return m_ov_model; @@ -118,13 +129,15 @@ std::shared_ptr TranslateSession::convert_pytorch_model( } } auto context = NodeContext(node, external_tensor_map, tensor_map, parameters, mutated_tensors, this); + // Add op type in the statistics + m_op_statistics[context.get_op_type()]++; auto converted_outputs = convert_node(context); auto fw_outputs = node->outputs(); // Ops with subgraphs or with mutated inputs may have more outputs after conversion compared to pytorch ones FRONT_END_OP_CONVERSION_CHECK(fw_outputs.size() <= converted_outputs.size(), "Number of ", - node->get_op_type(), + context.get_op_type(), " outputs greater then number of converted outputs."); // TODO: Make sure that mapping of fw_outputs to converted_outputs does always work diff --git a/src/frontends/pytorch/src/translate_session.hpp b/src/frontends/pytorch/src/translate_session.hpp index 4931c274984..939cba7d1bd 100644 --- a/src/frontends/pytorch/src/translate_session.hpp +++ b/src/frontends/pytorch/src/translate_session.hpp @@ -5,6 +5,7 @@ #pragma once #include "input_model.hpp" +#include "openvino/frontend/extension/telemetry.hpp" #include "openvino/frontend/pytorch/node_context.hpp" namespace ov { @@ -17,7 +18,9 @@ namespace pytorch { class TranslateSession { public: TranslateSession(const frontend::InputModel::Ptr& input_model, - const std::map& translator_map); + const std::map& translator_map, + const std::shared_ptr& telemetry); + ~TranslateSession(); std::shared_ptr get_converted_model(); std::shared_ptr translate_graph(const frontend::InputModel::Ptr& input_model); @@ -42,9 +45,11 @@ private: const frontend::InputModel::Ptr m_input_model; const std::map& m_translator_map; - + std::shared_ptr m_telemetry; std::shared_ptr m_ov_model; + std::map>> m_counter_map; + std::map m_op_statistics; }; } // namespace pytorch diff --git a/tests/layer_tests/py_frontend_tests/test_torch_frontend.py b/tests/layer_tests/py_frontend_tests/test_torch_frontend.py index 7c6a25dc7e2..7be15ba9cd3 100644 --- a/tests/layer_tests/py_frontend_tests/test_torch_frontend.py +++ b/tests/layer_tests/py_frontend_tests/test_torch_frontend.py @@ -65,3 +65,41 @@ def test_pytorch_fe_set_input_value(): im.set_tensor_value(place, np.random.randn(1, 2, 3, 4).astype(np.float32)) om = fe.convert(im) assert len(om.get_parameters()) == 0 + + +def test_pytorch_telemetry(): + from openvino.frontend import TelemetryExtension + from openvino.frontend.pytorch.decoder import TorchScriptPythonDecoder + + class MockTelemetry: + def __init__(self, stat): + self.stat = stat + + def send_event(self, *arg, **kwargs): + self.stat["send_event"] += 1 + + def send_error(self, *arg, **kwargs): + self.stat["send_error"] += 1 + + def send_stack_trace(self, *arg, **kwargs): + self.stat["send_stack_trace"] += 1 + + def add_ext(front_end, stat): + tel = MockTelemetry(stat) + front_end.add_extension(TelemetryExtension("mock", + tel.send_event, + tel.send_error, + tel.send_stack_trace)) + + tel_stat = {"send_event": 0, "send_error": 0, "send_stack_trace": 0} + # Ensure that MockTelemetry object is alive and can receive events (due to callbacks hold the object) + model = get_scripted_model(aten_relu()) + decoder = TorchScriptPythonDecoder(model) + fe_manager = FrontEndManager() + fe = fe_manager.load_by_framework("pytorch") + add_ext(fe, tel_stat) + im = fe.load(decoder) + fe.convert(im) + assert tel_stat["send_event"] == 2 + assert tel_stat["send_error"] == 0 + assert tel_stat["send_stack_trace"] == 0