[LPT] Move FQ disabled for non-per-tensor FQs (#7581)
This commit is contained in:
parent
be8600af38
commit
1aa6db4aaf
@ -39,12 +39,26 @@ MoveFakeQuantize::MoveFakeQuantize(const Params& params) : LayerTransformation(p
|
|||||||
output_low,
|
output_low,
|
||||||
output_high });
|
output_high });
|
||||||
|
|
||||||
ngraph::graph_rewrite_callback callback = [this](pattern::Matcher& m) {
|
ngraph::graph_rewrite_callback callback = [=](pattern::Matcher& m) {
|
||||||
auto op = m.get_match_root();
|
auto op = m.get_match_root();
|
||||||
if (transformation_callback(op)) {
|
if (transformation_callback(op)) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// workaround: only per-tensor quantization is allowed
|
||||||
|
const auto& pattern_map = m.get_pattern_value_map();
|
||||||
|
const auto is_scalar = [&](const std::shared_ptr<ngraph::Node>& wrapped_constant) {
|
||||||
|
return NetworkHelper::isScalarLike(
|
||||||
|
as_type_ptr<opset1::Constant>(pattern_map.at(wrapped_constant).get_node_shared_ptr()));
|
||||||
|
};
|
||||||
|
|
||||||
|
if (!is_scalar(input_low) ||
|
||||||
|
!is_scalar(input_high) ||
|
||||||
|
!is_scalar(output_low) ||
|
||||||
|
!is_scalar(output_high)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
return transform(*context, m);
|
return transform(*context, m);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user