* Removed back phase transformations related to IRv7 * Fixed setting value for the input port using the 'set_value' method * Removed front and middle phase transformations related to IRv7 * Cleanup the rest of the Model Optimizer transformations from IRv7 specific transformations * Final cleanup of the deprecated IR v7 related code * Removed 'blobs_as_input' usage in the Model Optimizer. * Removed function '_fuse_add' from the Model Optimizer since it is not used anymore. * Removed 'keep_in_IR' node attribute for FakeQuantize ops in the MO * Disabled failing gpu_engine.user_context test
39 lines
1.3 KiB
Python
39 lines
1.3 KiB
Python
"""
|
|
Copyright (C) 2018-2020 Intel Corporation
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
you may not use this file except in compliance with the License.
|
|
You may obtain a copy of the License at
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
See the License for the specific language governing permissions and
|
|
limitations under the License.
|
|
"""
|
|
from extensions.front.tf.FakeQuantWithMinMaxVars import FakeQuantWithMinMaxVarsToQuantize
|
|
from mo.front.common.replacement import FrontReplacementPattern
|
|
from mo.graph.graph import Graph
|
|
|
|
|
|
class DisableQuantizeValuePropagation(FrontReplacementPattern):
|
|
enabled = True
|
|
|
|
def run_after(self):
|
|
return [FakeQuantWithMinMaxVarsToQuantize]
|
|
|
|
@staticmethod
|
|
def pattern():
|
|
return dict(
|
|
nodes=[
|
|
('quantize', dict(op='FakeQuantize', levels=lambda levels: levels != 2)),
|
|
],
|
|
edges=[]
|
|
)
|
|
|
|
@staticmethod
|
|
def replace_pattern(graph: Graph, match: dict):
|
|
match['quantize']['stop_value_propagation'] = True
|