Fixed clearing of pipeline config params and TF session in convert_model() (#16191)

* Fixed pipeline config params clearing.

* Added clearing of TF session. Added tests.
This commit is contained in:
Anastasiia Pnevskaia 2023-03-13 17:03:02 +01:00 committed by GitHub
parent ca6ad433e4
commit 9462b3ea16
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 106 additions and 2 deletions

View File

@ -324,6 +324,9 @@ def load_tf_graph_def(graph_file_name: str = "", is_binary: bool = True, checkpo
def convert_to_pb(argv: argparse.Namespace):
from openvino.tools.mo.utils.cli_parser import get_model_name
env_setup = get_environment_setup("tf")
if "tensorflow" in env_setup and env_setup["tensorflow"] >= LooseVersion("2.0.0"):
tf.keras.backend.clear_session()
# if this is already binary frozen format .pb, there is no need to create auxiliary binary frozen protobuf
# the main thing is to differentiate this format from text frozen format and checkpoint

View File

@ -75,10 +75,10 @@ class PipelineConfig:
The class that parses pipeline.config files used to generate TF models generated using Object Detection API.
The class stores data read from the file in a plain dictionary for easier access using the get_param function.
"""
_raw_data_dict = dict()
_model_params = dict()
def __init__(self, file_name: str):
self._raw_data_dict = dict()
self._model_params = dict()
self._raw_data_dict = SimpleProtoParser().parse_file(file_name)
if not self._raw_data_dict:
raise Error('Failed to parse pipeline.config file {}'.format(file_name))

View File

@ -4,6 +4,7 @@
import unittest
from argparse import Namespace
from unittest.mock import patch
import os
from generator import generator, generate
@ -331,3 +332,19 @@ class TestObjectDetectionAPIPreprocessor2Replacement(unittest.TestCase):
(flag, resp) = compare_graphs(graph, self.build_ref_graph(False), 'result', check_op_attrs=True)
self.assertTrue(flag, resp)
class TestPipelineConfig(unittest.TestCase):
def test_pipeline_config_loading(self):
from openvino.tools.mo.utils.pipeline_config import PipelineConfig
pipeline_config = PipelineConfig(os.path.join(os.path.dirname(__file__), "test_configs/config1.config"))
assert pipeline_config.get_param('ssd_anchor_generator_num_layers') == 6
assert pipeline_config.get_param('num_classes') == 90
assert pipeline_config.get_param('resizer_image_width') == 300
assert pipeline_config.get_param('resizer_image_height') == 300
pipeline_config = PipelineConfig(os.path.join(os.path.dirname(__file__), "test_configs/config2.config"))
assert pipeline_config.get_param('ssd_anchor_generator_num_layers') is None
assert pipeline_config.get_param('num_classes') == 10
assert pipeline_config.get_param('resizer_image_width') == 640
assert pipeline_config.get_param('resizer_image_height') == 640

View File

@ -108,3 +108,59 @@ class ConvertToPBTests(unittest.TestCase):
"The test model in frozen binary format must exist")
# test convert_to_pb - expect no auxiliary model created
self.assertIsNone(convert_to_pb(self.argv))
def test_meta_format_session_clearing(self):
try:
import tensorflow.compat.v1 as tf_v1
except ImportError:
import tensorflow as tf_v1
from openvino.tools.mo.utils.versions_checker import get_environment_setup
from distutils.version import LooseVersion
env_setup = get_environment_setup("tf")
use_tf2 = False
if "tensorflow" in env_setup and env_setup["tensorflow"] >= LooseVersion("2.0.0"):
use_tf2 = True
from tensorflow.python.eager.context import graph_mode
with tempfile.TemporaryDirectory(dir=self.test_directory) as tmp_dir:
with graph_mode():
a = tf_v1.get_variable("A", initializer=tf_v1.constant(3, shape=[2]))
b = tf_v1.get_variable("B", initializer=tf_v1.constant(5, shape=[2]))
tf_v1.add(a, b, name='Add')
init_op = tf_v1.global_variables_initializer()
saver = tf_v1.train.Saver()
with tf_v1.Session() as sess:
sess.run(init_op)
saver.save(sess, os.path.join(tmp_dir, 'model1'))
if use_tf2:
import tensorflow as tf
tf.keras.backend.clear_session()
with graph_mode():
c = tf_v1.get_variable("C", initializer=tf_v1.constant(3, shape=[2]))
d = tf_v1.get_variable("D", initializer=tf_v1.constant(5, shape=[2]))
tf_v1.add(c, d, name='Add1')
init_op = tf_v1.global_variables_initializer()
saver = tf_v1.train.Saver()
with tf_v1.Session() as sess:
sess.run(init_op)
saver.save(sess, os.path.join(tmp_dir, 'model2'))
if use_tf2:
import tensorflow as tf
tf.keras.backend.clear_session()
self.argv.input_meta_graph = os.path.join(tmp_dir, 'model1.meta')
self.argv.output_dir = tmp_dir
path_to_pb = convert_to_pb(self.argv)
self.assertTrue(os.path.exists(path_to_pb), "The auxiliary .pb is not generated")
self.assertTrue(os.path.getsize(path_to_pb) != 0, "The auxiliary .pb is empty")
self.argv.input_meta_graph = os.path.join(tmp_dir, 'model2.meta')
self.argv.output_dir = tmp_dir
self.argv.input_model = None
path_to_pb = convert_to_pb(self.argv)
self.assertTrue(os.path.exists(path_to_pb), "The auxiliary .pb is not generated")
self.assertTrue(os.path.getsize(path_to_pb) != 0, "The auxiliary .pb is empty")

View File

@ -0,0 +1,17 @@
model {
ssd {
num_classes: 90
image_resizer {
fixed_shape_resizer {
height: 300
width: 300
}
}
anchor_generator {
ssd_anchor_generator {
num_layers: 6
}
}
}
}

View File

@ -0,0 +1,11 @@
model {
ssd {
num_classes: 10
image_resizer {
fixed_shape_resizer {
height: 640
width: 640
}
}
}
}