From 4882ccde035c8f607a8ab7c56de0f29982d9f253 Mon Sep 17 00:00:00 2001 From: Maxim Vafin Date: Tue, 22 Aug 2023 17:18:23 +0200 Subject: [PATCH] [PT FE] Fix issue when FakeQuantize is not inserted after regular operations (#19314) --- src/frontends/pytorch/src/utils_quantize.hpp | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/src/frontends/pytorch/src/utils_quantize.hpp b/src/frontends/pytorch/src/utils_quantize.hpp index fe40a52ebbf..69917e7b8bc 100644 --- a/src/frontends/pytorch/src/utils_quantize.hpp +++ b/src/frontends/pytorch/src/utils_quantize.hpp @@ -154,12 +154,13 @@ template out_idx, "Not enough outputs to apply quantization."); - if (const auto quantized_pt_node = cast_quantized_fw_node(context.get_input(in_idx).get_node_shared_ptr())) { - return {context.mark_node(std::make_shared(quantized_pt_node->get_type(), - translation_res[out_idx], - quantized_pt_node->get_scale(), - quantized_pt_node->get_zero_point(), - quantized_pt_node->get_dtype()))}; + auto target_input = context.get_input(in_idx); + if (const auto quantized_pt_node = cast_quantized_fw_node(target_input.get_node_shared_ptr())) { + return {quantize(context, + translation_res[out_idx], + quantized_pt_node->get_scale(), + quantized_pt_node->get_zero_point(), + target_input)}; } return translation_res; }