diff --git a/.ci/azure/linux.yml b/.ci/azure/linux.yml index c18d79d0cb2..2fda9d9b506 100644 --- a/.ci/azure/linux.yml +++ b/.ci/azure/linux.yml @@ -507,6 +507,13 @@ jobs: $(RUN_PREFIX) python3 -m pytest $(LAYER_TESTS_DIR)/tensorflow2_keras_tests/test_tf2_keras_activation.py --ir_version=11 --junitxml=./TEST-tf2_Activation.xmlTEST -k "sigmoid" displayName: 'TensorFlow 2 Layer Tests - Legacy FE' + - script: | + python3 -m pip install -r $(LAYER_TESTS_DIR)/requirements.txt + export PYTHONPATH=$(REPO_DIR)/tools/mo/:$(LAYER_TESTS_DIR):$PYTHONPATH + export TEST_DEVICE=CPU + $(RUN_PREFIX) python3 -m pytest $(LAYER_TESTS_DIR)/tensorflow_lite_tests/ --junitxml=$(INSTALL_TEST_DIR)/TEST-tfl_fe.xmlTEST + displayName: 'TensorFlow Lite Layer Tests - TFL FE' + - script: | python3 -m pip install -r $(LAYER_TESTS_DIR)/requirements.txt export PYTHONPATH=$(LAYER_TESTS_DIR):$PYTHONPATH diff --git a/.ci/azure/linux_debian.yml b/.ci/azure/linux_debian.yml index 1a4bceafd72..2c1176e3452 100644 --- a/.ci/azure/linux_debian.yml +++ b/.ci/azure/linux_debian.yml @@ -393,6 +393,13 @@ jobs: python3 -m pytest $(LAYER_TESTS_DIR)/tensorflow_tests/test_tf_Roll.py --ir_version=10 --junitxml=$(INSTALL_TEST_DIR)/TEST-tf_Roll.xmlTEST displayName: 'TensorFlow 1 Layer Tests - Legacy FE' + - script: | + python3 -m pip install -r $(LAYER_TESTS_DIR)/requirements.txt + export PYTHONPATH=$(REPO_DIR)/tools/mo/:$(LAYER_TESTS_DIR):$PYTHONPATH + export TEST_DEVICE=CPU + $(RUN_PREFIX) python3 -m pytest $(LAYER_TESTS_DIR)/tensorflow_lite_tests/ --junitxml=$(INSTALL_TEST_DIR)/TEST-tfl_fe.xmlTEST + displayName: 'TensorFlow Lite Layer Tests - TFL FE' + - task: PublishTestResults@2 condition: always() inputs: diff --git a/src/frontends/tensorflow_lite/src/decoder_flatbuffer.cpp b/src/frontends/tensorflow_lite/src/decoder_flatbuffer.cpp index 0ce55d90f9d..7b49dc4f5b0 100644 --- a/src/frontends/tensorflow_lite/src/decoder_flatbuffer.cpp +++ b/src/frontends/tensorflow_lite/src/decoder_flatbuffer.cpp @@ -48,11 +48,17 @@ std::string DecoderFlatBuffer::get_input_tensor_name(size_t idx) const { FRONT_END_GENERAL_CHECK(idx < get_input_size(), "Requested input is out-of-range"); return m_input_info.at(idx).tensor->name()->str(); } + std::string DecoderFlatBuffer::get_output_tensor_name(size_t idx) const { FRONT_END_GENERAL_CHECK(idx < get_output_size(), "Requested output is out-of-range"); return m_output_info.at(idx).tensor->name()->str(); } +ov::element::Type DecoderFlatBuffer::get_output_tensor_type(size_t idx) const { + FRONT_END_GENERAL_CHECK(idx < get_output_size(), "Requested output is out-of-range"); + return get_ov_type(m_output_info.at(idx).tensor->type()); +} + std::shared_ptr DecoderFlatBuffer::decode_input_tensor( size_t idx, const InputModel& model) const { diff --git a/src/frontends/tensorflow_lite/src/decoder_flatbuffer.h b/src/frontends/tensorflow_lite/src/decoder_flatbuffer.h index 251af2c2952..a6751a4703b 100644 --- a/src/frontends/tensorflow_lite/src/decoder_flatbuffer.h +++ b/src/frontends/tensorflow_lite/src/decoder_flatbuffer.h @@ -46,6 +46,7 @@ public: std::string& producer_name, size_t& producer_output_port_index) const override; std::string get_output_tensor_name(size_t idx) const; + element::Type get_output_tensor_type(size_t idx) const; std::string get_input_tensor_name(size_t idx) const; const std::string& get_op_type() const override; diff --git a/src/frontends/tensorflow_lite/src/op/cast.cpp b/src/frontends/tensorflow_lite/src/op/cast.cpp index 126bdec697b..283495d82c3 100644 --- a/src/frontends/tensorflow_lite/src/op/cast.cpp +++ b/src/frontends/tensorflow_lite/src/op/cast.cpp @@ -16,7 +16,7 @@ namespace op { OutputVector cast(const ov::frontend::tensorflow_lite::NodeContext& node) { const auto& decoder = get_decoder(node); std::map attrs{ - {"DstT", get_ov_type(decoder->get_attribute(&tflite::CastOptions::out_data_type))}, + {"DstT", decoder->get_output_tensor_type(0)}, }; return attribute_helper(node, attrs, ov::frontend::tensorflow::op::translate_cast_op); } diff --git a/tests/layer_tests/common/layer_test_class.py b/tests/layer_tests/common/layer_test_class.py index ba3583417f8..a43c2e74229 100644 --- a/tests/layer_tests/common/layer_test_class.py +++ b/tests/layer_tests/common/layer_test_class.py @@ -34,7 +34,6 @@ class CommonLayerTest: Example: "transform_1,transform_2" """ model_path = self.produce_model_path(framework_model=framework_model, save_path=temp_dir) - self.use_new_frontend = use_new_frontend self.use_old_api = use_old_api # TODO Pass environment variables via subprocess environment diff --git a/tests/layer_tests/common/tflite_layer_test_class.py b/tests/layer_tests/common/tflite_layer_test_class.py new file mode 100644 index 00000000000..fc459b5028f --- /dev/null +++ b/tests/layer_tests/common/tflite_layer_test_class.py @@ -0,0 +1,65 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +import os +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +import tensorflow as tf +from tensorflow.lite.tools import flatbuffer_utils as utils +from common.layer_test_class import CommonLayerTest +from common.utils.tflite_utils import get_tflite_results, get_tensors_from_graph + +class TFLiteLayerTest(CommonLayerTest): + model_path = None + inputs = None + outputs = None + allowed_ops = None + + def make_model(self, params): + raise RuntimeError("This is TensorFlow Lite base layer test class, " + "please implement make_model function for the specific test") + + def produce_tflite_model(self, framework_model, save_path): + with tf.Graph().as_default() as g: + tf.graph_util.import_graph_def(framework_model, name="") + input_tensors = get_tensors_from_graph(g, self.inputs) + output_tensors = get_tensors_from_graph(g, self.outputs) + + tflite_model = tf.compat.v1.lite.TFLiteConverter(framework_model, + input_tensors=input_tensors, + output_tensors=output_tensors).convert() + + tflite_model_path = os.path.join(os.path.dirname(save_path), 'model.tflite') + with tf.io.gfile.GFile(tflite_model_path, 'wb') as f: + f.write(tflite_model) + return tflite_model_path + + def produce_model_path(self, framework_model, save_path): + assert self.model_path, "Empty model path" + return self.model_path + + def get_framework_results(self, inputs_dict, model_path): + return get_tflite_results(self.use_new_frontend, self.use_old_api, inputs_dict, model_path) + + def check_tflite_model_has_only_allowed_ops(self): + if self.allowed_ops is None: + return + BO = utils.schema_fb.BuiltinOperator + builtin_operators = {getattr(BO, name): name for name in dir(BO) if not name.startswith("_")} + model = utils.read_model(self.model_path) + + op_names = [] + for op in model.operatorCodes: + assert op.customCode is None, "Encountered custom operation in the model" + deprecated_code = op.deprecatedBuiltinCode + deprecated_vs_normal = utils.schema_fb.BuiltinOperator.PLACEHOLDER_FOR_GREATER_OP_CODES + if deprecated_code < deprecated_vs_normal: + op_names.append(builtin_operators[op.deprecatedBuiltinCode]) + else: + op_names.append(builtin_operators[op.builtinCode]) + op_names = sorted(op_names) + assert op_names == self.allowed_ops, "TFLite model is not as you expect it to be: " + ", ".join(op_names) + + def _test(self, ie_device, precision, temp_dir, params): + model = self.make_model(params) + self.model_path = self.produce_tflite_model(model, temp_dir) + self.check_tflite_model_has_only_allowed_ops() + super()._test(model, None, ie_device, precision, None, temp_dir, False, True, **params) diff --git a/tests/layer_tests/common/utils/tflite_utils.py b/tests/layer_tests/common/utils/tflite_utils.py index 2c10d8437b1..3c700c54bec 100644 --- a/tests/layer_tests/common/utils/tflite_utils.py +++ b/tests/layer_tests/common/utils/tflite_utils.py @@ -58,3 +58,12 @@ def save_tf2_saved_model_to_tflite(savedmodel): return tflite_model_path + +def get_tensors_from_graph(graph, ops: list): + tensors = [] + for input_op in ops: + input_op_tensors = graph.get_operation_by_name(input_op).outputs + for op_out_tensor in input_op_tensors: + tensors.append(op_out_tensor) + + return tensors diff --git a/tests/layer_tests/tensorflow_lite_tests/conftest.py b/tests/layer_tests/tensorflow_lite_tests/conftest.py new file mode 100644 index 00000000000..6d1ec3182a9 --- /dev/null +++ b/tests/layer_tests/tensorflow_lite_tests/conftest.py @@ -0,0 +1,12 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import inspect + +from common.layer_test_class import get_params + + +def pytest_generate_tests(metafunc): + test_gen_attrs_names = list(inspect.signature(get_params).parameters) + params = get_params() + metafunc.parametrize(test_gen_attrs_names, params, scope="function") diff --git a/tests/layer_tests/tensorflow_lite_tests/test_tfl_Unary.py b/tests/layer_tests/tensorflow_lite_tests/test_tfl_Unary.py new file mode 100644 index 00000000000..0c7e9f849e1 --- /dev/null +++ b/tests/layer_tests/tensorflow_lite_tests/test_tfl_Unary.py @@ -0,0 +1,100 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +import itertools +from functools import partial + +import numpy as np +import pytest +import tensorflow as tf +from common.tflite_layer_test_class import TFLiteLayerTest + +np.random.seed(42) + + +def make_positive_array(inputs_dict): + for input in inputs_dict.keys(): + inputs_dict[input] = np.random.randint(1, 10, inputs_dict[input]).astype(np.float32) + return inputs_dict + + +def make_boolean_array(inputs_dict): + for input in inputs_dict.keys(): + inputs_dict[input] = np.random.randint(0, 1, inputs_dict[input]) > 1 + return inputs_dict + + +data_generators = { + 'positive': make_positive_array, + 'boolean': make_boolean_array, +} + +test_ops = [ + {'op_name': 'ABS', 'op_func': tf.math.abs}, + {'op_name': 'CAST', 'op_func': partial(tf.cast, dtype=tf.int32)}, + {'op_name': 'CEIL', 'op_func': tf.math.ceil}, + {'op_name': 'COS', 'op_func': tf.math.cos}, + {'op_name': 'ELU', 'op_func': tf.nn.elu}, + {'op_name': 'EXP', 'op_func': tf.math.exp, 'kwargs_to_prepare_input': 'positive'}, + {'op_name': 'FLOOR', 'op_func': tf.math.floor}, + {'op_name': 'LEAKY_RELU', 'op_func': partial(tf.nn.leaky_relu, alpha=-0.5)}, + {'op_name': 'LOG', 'op_func': tf.math.log, 'kwargs_to_prepare_input': 'positive'}, + {'op_name': 'LOG_SOFTMAX', 'op_func': partial(tf.nn.log_softmax, axis=-1)}, + {'op_name': 'LOGICAL_NOT', 'op_func': tf.math.logical_not, 'kwargs_to_prepare_input': 'boolean', 'dtype': tf.bool}, + {'op_name': 'LOGISTIC', 'op_func': tf.math.sigmoid}, + {'op_name': 'NEG', 'op_func': tf.math.negative}, + {'op_name': 'RELU6', 'op_func': tf.nn.relu6}, + {'op_name': 'ROUND', 'op_func': tf.math.round}, + {'op_name': 'RSQRT', 'op_func': tf.math.rsqrt, 'kwargs_to_prepare_input': 'positive'}, + {'op_name': 'SIN', 'op_func': tf.math.sin}, + {'op_name': 'SOFTMAX', 'op_func': partial(tf.nn.softmax, axis=-1)}, # additionally test with alpha + {'op_name': 'SQRT', 'op_func': tf.math.sqrt, 'kwargs_to_prepare_input': 'positive'}, + {'op_name': 'SQUARE', 'op_func': tf.math.square}, + {'op_name': 'TANH', 'op_func': tf.math.tanh}, + + # These operations are getting optimized out by tflite aka empty tfl model + # {'op_name': 'RANK', 'op_func': tf.rank}, + # {'op_name': 'SHAPE', 'op_func': partial(tf.shape, out_type=tf.int32)}, + + # This op could not be converted standalone -- tries to become FlexOp (offload from tfl to tf) + # {'op_name': 'SIGN', 'op_func': tf.math.sign}, + + # TF has no such standalone operation + # {'op_name': 'HARD_SWISH'} +] + +test_params = [ + {'shape': [2, 10, 10, 3]}, + {'shape': [2, 10]} +] + +test_data = list(itertools.product(test_ops, test_params)) +for i, (parameters, shapes) in enumerate(test_data): + parameters.update(shapes) + test_data[i] = parameters.copy() + + +class TestTFLiteUnaryLayerTest(TFLiteLayerTest): + inputs = ["Input"] + outputs = ["UnaryOperation"] + + def _prepare_input(self, inputs_dict, generator=None): + if generator is None: + return super()._prepare_input(inputs_dict) + return data_generators[generator](inputs_dict) + + + def make_model(self, params): + assert len(set(params.keys()).intersection({'op_name', 'op_func', 'shape'})) == 3, \ + 'Unexpected parameters for test: ' + ','.join(params.keys()) + self.allowed_ops = [params['op_name']] + tf.compat.v1.reset_default_graph() + with tf.compat.v1.Session() as sess: + place_holder = tf.compat.v1.placeholder(params.get('dtype', tf.float32), params['shape'], name=TestTFLiteUnaryLayerTest.inputs[0]) + params['op_func'](place_holder, name=TestTFLiteUnaryLayerTest.outputs[0]) + net = sess.graph_def + return net + + @pytest.mark.parametrize("params", test_data) + @pytest.mark.nightly + def test_unary(self, params, ie_device, precision, temp_dir): + self._test(ie_device, precision, temp_dir, params)