[TF Hub][GitHub Actions][TF FE] Introduce TF Hub Models Validation in GitHub Actions (#19368)

This commit is contained in:
Roman Kazantsev 2023-08-23 22:40:31 +04:00 committed by GitHub
parent ab900606cd
commit 1d0d00bf22
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 243 additions and 0 deletions

1
.github/CODEOWNERS vendored
View File

@ -93,6 +93,7 @@
/tests/layer_tests/pytorch_tests/ @openvinotoolkit/openvino-pytorch-frontend-maintainers
/tests/layer_tests/tensorflow_tests @openvinotoolkit/openvino-tf-frontend-maintainers
/tests/layer_tests/jax_tests @openvinotoolkit/openvino-tf-frontend-maintainers
/tests/model_hub_tests @openvinotoolkit/openvino-tf-frontend-maintainers
# Tools:
/tools/ @openvinotoolkit/openvino-tools-maintainers

View File

@ -41,6 +41,7 @@ jobs:
INSTALL_TEST_DIR: ${{ github.workspace }}/install/tests
SAMPLES_INSTALL_DIR: ${{ github.workspace }}/install/samples
LAYER_TESTS_INSTALL_DIR: ${{ github.workspace }}/install/tests/layer_tests
MODEL_HUB_TESTS_INSTALL_DIR: ${{ github.workspace }}/install/tests/model_hub_tests
BUILD_DIR: ${{ github.workspace }}/build
DATA_PATH: ${{ github.workspace }}/testdata
MODELS_PATH: ${{ github.workspace }}/testdata
@ -183,15 +184,24 @@ jobs:
- name: Cmake Layer Tests
run: cmake -GNinja -S ${{ env.OPENVINO_REPO }}/tests/layer_tests -B ${{ env.BUILD_DIR }}/layer_tests
- name: Cmake Model Hub Tests
run: cmake -GNinja -S ${{ env.OPENVINO_REPO }}/tests/model_hub_tests -B ${{ env.BUILD_DIR }}/model_hub_tests
- name: Build Layer Tests
run: cmake --build ${{ env.BUILD_DIR }}/layer_tests --parallel --config Release
- name: Build Model Hub Tests
run: cmake --build ${{ env.BUILD_DIR }}/model_hub_tests --parallel --config Release
- name: Install wheel packages
run: cmake -DCOMPONENT=python_wheels -DCMAKE_INSTALL_PREFIX=${{ env.INSTALL_DIR }} -P ${{ env.BUILD_DIR }}/cmake_install.cmake
- name: Install Layer Tests
run: cmake -DCOMPONENT=tests -DCMAKE_INSTALL_PREFIX=${{ env.INSTALL_DIR }} -P ${{ env.BUILD_DIR }}/layer_tests/cmake_install.cmake
- name: Install Model Hub Tests
run: cmake -DCOMPONENT=tests -DCMAKE_INSTALL_PREFIX=${{ env.INSTALL_DIR }} -P ${{ env.BUILD_DIR }}/model_hub_tests/cmake_install.cmake
- name: Install python wheels
run: python3 -m pip install openvino-dev --find-links=${{ env.INSTALL_DIR }}/tools
@ -482,6 +492,7 @@ jobs:
INSTALL_TEST_DIR: ${{ github.workspace }}/install/tests
SAMPLES_INSTALL_DIR: ${{ github.workspace }}/install/samples
LAYER_TESTS_INSTALL_DIR: ${{ github.workspace }}/install/tests/layer_tests
MODEL_HUB_TESTS_INSTALL_DIR: ${{ github.workspace }}/install/tests/model_hub_tests
BUILD_DIR: ${{ github.workspace }}/build
DATA_PATH: ${{ github.workspace }}/testdata
MODELS_PATH: ${{ github.workspace }}/testdata
@ -655,6 +666,18 @@ jobs:
env:
TEST_DEVICE: CPU
- name: TensorFlow Hub Tests - TF FE
run: |
python3 -m pip install -r ${{ env.MODEL_HUB_TESTS_INSTALL_DIR }}/tf_hub_tests/requirements.txt
export PYTHONPATH=${{ env.MODEL_HUB_TESTS_INSTALL_DIR }}:$PYTHONPATH
source ${{ env.INSTALL_DIR }}/setupvars.sh
python3 -m pytest ${{ env.MODEL_HUB_TESTS_INSTALL_DIR }}/tf_hub_tests/ -m precommit --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-tf_hub_tf_fe.xml
env:
TEST_DEVICE: CPU
- name: TensorFlow 1 Layer Tests - Legacy FE
run: |
python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt

View File

@ -0,0 +1,9 @@
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
cmake_minimum_required(VERSION 3.13)
project(model_hub_tests)
install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} DESTINATION tests COMPONENT tests EXCLUDE_FROM_ALL)

View File

@ -0,0 +1,8 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import os
# supported_devices : CPU, GPU, GNA
test_device = os.environ.get('TEST_DEVICE', 'CPU;GPU').split(';')

View File

@ -0,0 +1,86 @@
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from openvino.runtime import Core
from openvino.tools.mo import convert_model
class TestConvertModel:
def load_model(self, model_name, model_link):
raise "load_model is not implemented"
def get_inputs_info(self, model_obj):
raise "get_inputs_info is not implemented"
def prepare_input(self, input_shape, input_type):
if input_type in [np.float32, np.float64]:
return np.random.randint(-2, 2, size=input_shape).astype(input_type)
elif input_type in [np.int8, np.int16, np.int32, np.int64]:
return np.random.randint(-5, 5, size=input_shape).astype(input_type)
elif input_type in [np.uint8, np.uint16]:
return np.random.randint(0, 5, size=input_shape).astype(input_type)
elif input_type in [str]:
return np.broadcast_to("Some string", input_shape)
elif input_type in [bool]:
return np.random.randint(0, 2, size=input_shape).astype(input_type)
else:
assert False, "Unsupported type {}".format(input_type)
def prepare_inputs(self, inputs_info):
inputs = []
for input_shape, input_type in inputs_info:
inputs.append(self.prepare_input(input_shape, input_type))
return inputs
def convert_model(self, model_obj):
ov_model = convert_model(model_obj)
return ov_model
def infer_fw_model(self, model_obj, inputs):
raise "infer_fw_model is not implemented"
def infer_ov_model(self, ov_model, inputs, ie_device):
core = Core()
compiled = core.compile_model(ov_model, ie_device)
ov_outputs = compiled(inputs)
return ov_outputs
def compare_results(self, fw_outputs, ov_outputs):
assert len(fw_outputs) == len(ov_outputs), \
"Different number of outputs between TensorFlow and OpenVINO:" \
" {} vs. {}".format(len(fw_outputs), len(ov_outputs))
fw_eps = 5e-2
is_ok = True
for out_name in fw_outputs.keys():
cur_fw_res = fw_outputs[out_name]
assert out_name in ov_outputs, \
"OpenVINO outputs does not contain tensor with name {}".format(out_name)
cur_ov_res = ov_outputs[out_name]
print(f"fw_re: {cur_fw_res};\n ov_res: {cur_ov_res}")
if not np.allclose(cur_ov_res, cur_fw_res,
atol=fw_eps,
rtol=fw_eps, equal_nan=True):
is_ok = False
print("Max diff is {}".format(np.array(abs(cur_ov_res - cur_fw_res)).max()))
else:
print("Accuracy validation successful!\n")
print("absolute eps: {}, relative eps: {}".format(fw_eps, fw_eps))
assert is_ok, "Accuracy validation failed"
def run(self, model_name, model_link, ie_device):
print("Load the model {} (url: {})".format(model_name, model_link))
concrete_func = self.load_model(model_name, model_link)
print("Retrieve inputs info")
inputs_info = self.get_inputs_info(concrete_func)
print("Prepare input data")
inputs = self.prepare_inputs(inputs_info)
print("Convert the model into ov::Model")
ov_model = self.convert_model(concrete_func)
print("Infer the original model")
fw_outputs = self.infer_fw_model(concrete_func, inputs)
print("Infer ov::Model")
ov_outputs = self.infer_ov_model(ov_model, inputs, ie_device)
print("Compare TensorFlow and OpenVINO results")
self.compare_results(fw_outputs, ov_outputs)

View File

@ -0,0 +1,24 @@
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import itertools
from models_hub_common.constants import test_device
def get_models_list(file_name: str):
models = []
with open(file_name) as f:
for model_info in f:
model_name, model_link = model_info.split(',')
models.append((model_name, model_link))
return models
def get_params(ie_device=None):
ie_device_params = ie_device if ie_device else test_device
test_args = []
for element in itertools.product(ie_device_params):
test_args.append(element)
return test_args

View File

@ -0,0 +1,12 @@
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import inspect
from models_hub_common.utils import get_params
def pytest_generate_tests(metafunc):
test_gen_attrs_names = list(inspect.signature(get_params).parameters)
params = get_params()
metafunc.parametrize(test_gen_attrs_names, params, scope="function")

View File

@ -0,0 +1 @@
vision/embedder/fungi_V2,https://tfhub.dev/svampeatlas/vision/embedder/fungi_V2/1?tf-hub-format=compressed

View File

@ -0,0 +1,5 @@
-c ../../constraints.txt
numpy
pytest
tensorflow
tensorflow-hub

View File

@ -0,0 +1,74 @@
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import os
import numpy as np
import pytest
import tensorflow as tf
import tensorflow_hub as hub
from models_hub_common.test_convert_model import TestConvertModel
from models_hub_common.utils import get_models_list
class TestTFHubConvertModel(TestConvertModel):
def load_model(self, model_name, model_link):
load = hub.load(model_link)
if 'default' in list(load.signatures.keys()):
concrete_func = load.signatures['default']
else:
signature_keys = sorted(list(load.signatures.keys()))
assert len(signature_keys) > 0, "No signatures for a model {}, url {}".format(model_name, model_link)
concrete_func = load.signatures[signature_keys[0]]
return concrete_func
def get_inputs_info(self, model_obj):
inputs_info = []
for input_info in model_obj.inputs:
input_shape = []
for dim in input_info.shape.as_list():
if dim is None:
input_shape.append(1)
else:
input_shape.append(dim)
type_map = {
tf.float64: np.float64,
tf.float32: np.float32,
tf.int8: np.int8,
tf.int16: np.int16,
tf.int32: np.int32,
tf.int64: np.int64,
tf.uint8: np.uint8,
tf.uint16: np.uint16,
tf.string: str,
tf.bool: bool,
}
assert input_info.dtype in type_map, "Unsupported input type: {}".format(input_info.dtype)
inputs_info.append((input_shape, type_map[input_info.dtype]))
return inputs_info
def infer_fw_model(self, model_obj, inputs):
tf_inputs = []
for input_data in inputs:
tf_inputs.append(tf.constant(input_data))
output_dict = {}
for out_name, out_value in model_obj(*tf_inputs).items():
output_dict[out_name] = out_value.numpy()
# map external tensor names to internal names
# TODO: remove this workaround
fw_outputs = {}
for out_name, out_value in output_dict.items():
mapped_name = out_name
if out_name in model_obj.structured_outputs:
mapped_name = model_obj.structured_outputs[out_name].name
fw_outputs[mapped_name] = out_value
return fw_outputs
@pytest.mark.parametrize("model_name,model_link",
get_models_list(os.path.join(os.path.dirname(__file__), "precommit_models")))
@pytest.mark.precommit
def test_convert_model(self, model_name, model_link, ie_device):
# we do not perform transpose in the test in case of new frontend
self.run(model_name, model_link, ie_device)