[TF FE] Implement and refactor tensorflow layer tests (#8051)
* Revert submodule changes * Fix build on Win * Fix precommit: set correct shapes for broadcasting; disable check with ref for use_new_frontend mode * fix precommit * Fix precommits * Temporary skip new tests on GPU with FP16 * Resolve review comments, trigger CI * Resolve review comments * Resolve review comments
This commit is contained in:
parent
6ac18bbd1b
commit
00c7da0f5f
@ -1,8 +1,8 @@
|
||||
# Copyright (C) 2018-2021 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from extensions.ops.activation_ops import Abs, Elu, Erf, Exp, ReLU, LeakyReLU, LogicalNot, ReLU6, Sigmoid, \
|
||||
Sin, Sinh, Cos, Cosh, Tan, Tanh, Ceiling, Atanh, Acosh, Asinh, Mish, Log
|
||||
from extensions.ops.activation_ops import Abs, Acos, Asin, Atan, Elu, Erf, Exp, ReLU, LeakyReLU, LogicalNot, ReLU6, \
|
||||
Sigmoid, Sin, Sinh, Cos, Cosh, Tan, Tanh, Ceiling, Atanh, Acosh, Asinh, Mish, Log
|
||||
from mo.front.extractor import FrontExtractorOp
|
||||
|
||||
|
||||
@ -218,3 +218,33 @@ class LogExtractor(FrontExtractorOp):
|
||||
def extract(cls, node):
|
||||
Log.update_node_stat(node)
|
||||
return cls.enabled
|
||||
|
||||
|
||||
class AsinExtractor(FrontExtractorOp):
|
||||
op = 'Asin'
|
||||
enabled = True
|
||||
|
||||
@classmethod
|
||||
def extract(cls, node):
|
||||
Asin.update_node_stat(node)
|
||||
return cls.enabled
|
||||
|
||||
|
||||
class AcosExtractor(FrontExtractorOp):
|
||||
op = 'Acos'
|
||||
enabled = True
|
||||
|
||||
@classmethod
|
||||
def extract(cls, node):
|
||||
Acos.update_node_stat(node)
|
||||
return cls.enabled
|
||||
|
||||
|
||||
class AtanExtractor(FrontExtractorOp):
|
||||
op = 'Atan'
|
||||
enabled = True
|
||||
|
||||
@classmethod
|
||||
def extract(cls, node):
|
||||
Atan.update_node_stat(node)
|
||||
return cls.enabled
|
||||
|
@ -24,14 +24,15 @@ class CommonLayerTest:
|
||||
def get_framework_results(self, inputs_dict, model_path):
|
||||
pass
|
||||
|
||||
def _test(self, framework_model, ref_net, ie_device, precision, ir_version, temp_dir, infer_timeout=60,
|
||||
enabled_transforms='', disabled_transforms='', **kwargs):
|
||||
def _test(self, framework_model, ref_net, ie_device, precision, ir_version, temp_dir, use_new_frontend=False,
|
||||
infer_timeout=60, enabled_transforms='', disabled_transforms='', **kwargs):
|
||||
"""
|
||||
:param enabled_transforms/disabled_transforms: string with idxs of transforms that should be enabled/disabled.
|
||||
Example: "transform_1,transform_2"
|
||||
"""
|
||||
model_path = self.produce_model_path(framework_model=framework_model, save_path=temp_dir)
|
||||
|
||||
self.use_new_frontend = use_new_frontend
|
||||
# TODO Pass environment variables via subprocess environment
|
||||
os.environ['MO_ENABLED_TRANSFORMS'] = enabled_transforms
|
||||
os.environ['MO_DISABLED_TRANSFORMS'] = disabled_transforms
|
||||
@ -50,6 +51,9 @@ class CommonLayerTest:
|
||||
if 'input_names' in kwargs and len(kwargs['input_names']):
|
||||
mo_params.update(dict(input=','.join(kwargs['input_names'])))
|
||||
|
||||
if use_new_frontend:
|
||||
mo_params["use_new_frontend"] = True
|
||||
|
||||
exit_code, stderr = generate_ir(**mo_params)
|
||||
|
||||
del os.environ['MO_ENABLED_TRANSFORMS']
|
||||
|
@ -4,10 +4,33 @@
|
||||
import os
|
||||
|
||||
from common.layer_test_class import CommonLayerTest
|
||||
|
||||
from common.utils.tf_utils import summarize_graph
|
||||
|
||||
|
||||
def transpose_nchw_to_nhwc(data, use_new_frontend):
|
||||
if use_new_frontend:
|
||||
return data
|
||||
|
||||
if len(data.shape) == 4: # reshaping for 4D tensors
|
||||
return data.transpose(0, 2, 3, 1)
|
||||
elif len(data.shape) == 5: # reshaping for 5D tensors
|
||||
return data.transpose(0, 2, 3, 4, 1)
|
||||
else:
|
||||
return data
|
||||
|
||||
|
||||
def transpose_nhwc_to_nchw(data, use_new_frontend):
|
||||
if use_new_frontend:
|
||||
return data
|
||||
|
||||
if len(data.shape) == 4: # reshaping for 4D tensors
|
||||
return data.transpose(0, 3, 1, 2) # 2, 0, 1
|
||||
elif len(data.shape) == 5: # reshaping for 5D tensors
|
||||
return data.transpose(0, 4, 1, 2, 3) # 3, 0, 1, 2
|
||||
else:
|
||||
return data
|
||||
|
||||
|
||||
def save_to_pb(tf_model, path_to_saved_tf_model):
|
||||
import tensorflow as tf
|
||||
tf.io.write_graph(tf_model, path_to_saved_tf_model, 'model.pb', False)
|
||||
@ -41,21 +64,11 @@ class CommonTFLayerTest(CommonLayerTest):
|
||||
input = dict()
|
||||
for key in inputs_dict.keys():
|
||||
data = inputs_dict.get(key)
|
||||
if len(data.shape) == 4: # reshaping for 4D tensors
|
||||
input[key+':0'] = data.transpose(0, 2, 3, 1)
|
||||
elif len(data.shape) == 5: # reshaping for 5D tensors
|
||||
input[key+':0'] = data.transpose(0, 2, 3, 4, 1)
|
||||
else:
|
||||
input[key+':0'] = data
|
||||
input[key + ':0'] = transpose_nchw_to_nhwc(data, self.use_new_frontend)
|
||||
tf_res = sess.run([out + ":0" for out in outputs_list], input)
|
||||
|
||||
result = dict()
|
||||
for i, output in enumerate(outputs_list):
|
||||
_tf_res = tf_res[i]
|
||||
if len(_tf_res.shape) == 4: # reshaping for 4D tensors
|
||||
result[output] = _tf_res.transpose(0, 3, 1, 2) # 2, 0, 1
|
||||
elif len(_tf_res.shape) == 5: # reshaping for 5D tensors
|
||||
result[output] = _tf_res.transpose(0, 4, 1, 2, 3) # 3, 0, 1, 2
|
||||
else:
|
||||
result[output] = _tf_res
|
||||
result[output] = transpose_nhwc_to_nchw(_tf_res, self.use_new_frontend)
|
||||
return result
|
||||
|
@ -7,6 +7,7 @@ import re
|
||||
import tensorflow as tf
|
||||
import numpy as np
|
||||
|
||||
from mo.ops.op import PermuteAttrs
|
||||
|
||||
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
|
||||
|
||||
@ -107,3 +108,23 @@ def summarize_graph(model_path, output_nodes_for_freeze=None, reshape_net=None):
|
||||
result['inputs'][layer]['shape'] = scoring_res[layer].shape
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def permute_nhwc_to_nchw(shape, use_new_frontend=False):
|
||||
if use_new_frontend:
|
||||
return shape
|
||||
perm = PermuteAttrs.get_nhwc_to_nchw_permutation(len(shape)).perm
|
||||
new_shape = np.array(shape)[perm]
|
||||
return new_shape
|
||||
|
||||
|
||||
def permute_nchw_to_nhwc(shape, use_new_frontend=False):
|
||||
if use_new_frontend:
|
||||
return shape
|
||||
perm = PermuteAttrs.get_nchw_to_nhwc_permutation(len(shape)).perm
|
||||
new_shape = np.array(shape)[perm]
|
||||
return new_shape
|
||||
|
||||
|
||||
def permute_axis(axis, permutation_inv):
|
||||
return permutation_inv[axis]
|
||||
|
@ -59,6 +59,11 @@ def pytest_addoption(parser):
|
||||
required=True,
|
||||
action="store",
|
||||
help="Version of IR to generate by Model Optimizer")
|
||||
parser.addoption(
|
||||
"--use_new_frontend",
|
||||
required=False,
|
||||
action="store_true",
|
||||
help="Use Model Optimizer with new FrontEnd")
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
@ -67,6 +72,12 @@ def ir_version(request):
|
||||
return request.config.getoption('ir_version')
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def use_new_frontend(request):
|
||||
"""Fixture function for command-line option."""
|
||||
return request.config.getoption('use_new_frontend')
|
||||
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def temp_dir(request):
|
||||
"""Create directory for test purposes."""
|
||||
|
@ -1,22 +0,0 @@
|
||||
# Copyright (C) 2018-2021 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import numpy as np
|
||||
|
||||
from mo.ops.op import PermuteAttrs
|
||||
|
||||
|
||||
def permute_nhwc_to_nchw(shape):
|
||||
perm = PermuteAttrs.get_nhwc_to_nchw_permutation(len(shape)).perm
|
||||
new_shape = np.array(shape)[perm]
|
||||
return new_shape
|
||||
|
||||
|
||||
def permute_nchw_to_nhwc(shape):
|
||||
perm = PermuteAttrs.get_nchw_to_nhwc_permutation(len(shape)).perm
|
||||
new_shape = np.array(shape)[perm]
|
||||
return new_shape
|
||||
|
||||
|
||||
def permute_axis(axis, permutation_inv):
|
||||
return permutation_inv[axis]
|
@ -5,10 +5,11 @@ import numpy as np
|
||||
import pytest
|
||||
|
||||
from common.tf_layer_test_class import CommonTFLayerTest
|
||||
from common.utils.tf_utils import permute_nchw_to_nhwc
|
||||
|
||||
|
||||
class TestAdd(CommonTFLayerTest):
|
||||
def create_add_placeholder_const_net(self, x_shape, y_shape, ir_version):
|
||||
def create_add_placeholder_const_net(self, x_shape, y_shape, ir_version, use_new_frontend):
|
||||
"""
|
||||
Tensorflow net IR net
|
||||
|
||||
@ -30,11 +31,9 @@ class TestAdd(CommonTFLayerTest):
|
||||
with tf.compat.v1.Session() as sess:
|
||||
tf_x_shape = x_shape.copy()
|
||||
tf_y_shape = y_shape.copy()
|
||||
# reshaping
|
||||
if len(tf_x_shape) >= 3:
|
||||
tf_x_shape.append(tf_x_shape.pop(1))
|
||||
if len(tf_y_shape) >= 3:
|
||||
tf_y_shape.append(tf_y_shape.pop(1))
|
||||
|
||||
tf_x_shape = permute_nchw_to_nhwc(tf_x_shape, use_new_frontend)
|
||||
tf_y_shape = permute_nchw_to_nhwc(tf_y_shape, use_new_frontend)
|
||||
|
||||
x = tf.compat.v1.placeholder(tf.float32, tf_x_shape, 'Input')
|
||||
constant_value = np.random.randint(-256, 256, tf_y_shape).astype(np.float32)
|
||||
@ -55,23 +54,6 @@ class TestAdd(CommonTFLayerTest):
|
||||
# Moreover, do not forget to validate ALL layer attributes!!!
|
||||
#
|
||||
|
||||
if len(add_shape) >= 3:
|
||||
# Permute add_shape to (N,C,...) format
|
||||
order = [0, len(add_shape) - 1] + list(range(1, len(add_shape) - 1))
|
||||
add_shape = [add_shape[i] for i in order]
|
||||
|
||||
y_shape_to_compare = tf_y_shape.copy()
|
||||
while len(y_shape_to_compare) < len(x_shape):
|
||||
# Expand shape of constant with 1
|
||||
y_shape_to_compare = [1] + y_shape_to_compare
|
||||
constant_value = np.expand_dims(constant_value, axis=0)
|
||||
|
||||
if len(y_shape_to_compare) >= 3:
|
||||
# Permute constant_value to (N,C,...) format for correct further reshape
|
||||
order = [0, len(y_shape_to_compare) - 1] + list(range(1, len(y_shape_to_compare) - 1))
|
||||
y_shape_to_compare = [y_shape_to_compare[i] for i in order]
|
||||
constant_value = np.transpose(constant_value, order)
|
||||
|
||||
ref_net = None
|
||||
|
||||
return tf_net, ref_net
|
||||
@ -87,9 +69,9 @@ class TestAdd(CommonTFLayerTest):
|
||||
|
||||
@pytest.mark.parametrize("params", test_data_1D)
|
||||
@pytest.mark.nightly
|
||||
def test_add_placeholder_const_1D(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_add_placeholder_const_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
def test_add_placeholder_const_1D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_add_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
||||
test_data_2D = [
|
||||
# Power
|
||||
@ -104,9 +86,9 @@ class TestAdd(CommonTFLayerTest):
|
||||
|
||||
@pytest.mark.parametrize("params", test_data_2D)
|
||||
@pytest.mark.nightly
|
||||
def test_add_placeholder_const_2D(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_add_placeholder_const_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
def test_add_placeholder_const_2D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_add_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
||||
test_data_3D = [
|
||||
# Power
|
||||
@ -122,9 +104,9 @@ class TestAdd(CommonTFLayerTest):
|
||||
|
||||
@pytest.mark.parametrize("params", test_data_3D)
|
||||
@pytest.mark.nightly
|
||||
def test_add_placeholder_const_3D(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_add_placeholder_const_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
def test_add_placeholder_const_3D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_add_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
||||
test_data_4D = [
|
||||
# Power
|
||||
@ -140,9 +122,9 @@ class TestAdd(CommonTFLayerTest):
|
||||
# TODO mark as precommit (after successfully passing in nightly)
|
||||
@pytest.mark.parametrize("params", test_data_4D)
|
||||
@pytest.mark.nightly
|
||||
def test_add_placeholder_const_4D(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_add_placeholder_const_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
def test_add_placeholder_const_4D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_add_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
||||
test_data_5D = [
|
||||
# Power
|
||||
@ -159,9 +141,9 @@ class TestAdd(CommonTFLayerTest):
|
||||
# TODO mark as precommit (after successfully passing in nightly)
|
||||
@pytest.mark.parametrize("params", test_data_5D)
|
||||
@pytest.mark.nightly
|
||||
def test_add_placeholder_const_5D(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_add_placeholder_const_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version=ir_version, temp_dir=temp_dir)
|
||||
def test_add_placeholder_const_5D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_add_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version=ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
||||
###############################################################################################
|
||||
# #
|
||||
@ -176,9 +158,9 @@ class TestAdd(CommonTFLayerTest):
|
||||
|
||||
@pytest.mark.parametrize("params", test_data_broadcast_1D)
|
||||
@pytest.mark.nightly
|
||||
def test_add_placeholder_const_broadcast_1D(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_add_placeholder_const_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version=ir_version, temp_dir=temp_dir)
|
||||
def test_add_placeholder_const_broadcast_1D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_add_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version=ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
||||
test_data_broadcast_2D = [
|
||||
# Power
|
||||
@ -195,9 +177,9 @@ class TestAdd(CommonTFLayerTest):
|
||||
|
||||
@pytest.mark.parametrize("params", test_data_broadcast_2D)
|
||||
@pytest.mark.nightly
|
||||
def test_add_placeholder_const_broadcast_2D(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_add_placeholder_const_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version=ir_version, temp_dir=temp_dir)
|
||||
def test_add_placeholder_const_broadcast_2D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_add_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version=ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
||||
test_data_broadcast_3D = [
|
||||
# Power
|
||||
@ -218,9 +200,9 @@ class TestAdd(CommonTFLayerTest):
|
||||
|
||||
@pytest.mark.parametrize("params", test_data_broadcast_3D)
|
||||
@pytest.mark.nightly
|
||||
def test_add_placeholder_const_broadcast_3D(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_add_placeholder_const_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version=ir_version, temp_dir=temp_dir)
|
||||
def test_add_placeholder_const_broadcast_3D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_add_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version=ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
||||
test_data_broadcast_4D = [
|
||||
# Power
|
||||
@ -236,9 +218,9 @@ class TestAdd(CommonTFLayerTest):
|
||||
# Eltwise
|
||||
dict(x_shape=[1, 3, 1, 1], y_shape=[3, 1]),
|
||||
# Eltwise
|
||||
dict(x_shape=[1, 3, 1, 2], y_shape=[3, 1, 2]),
|
||||
dict(x_shape=[1, 2, 1, 3], y_shape=[3, 1, 2]),
|
||||
# Eltwise
|
||||
dict(x_shape=[1, 3, 1, 2], y_shape=[1, 3, 2]),
|
||||
dict(x_shape=[1, 2, 1, 3], y_shape=[1, 3, 2]),
|
||||
# Eltwise
|
||||
dict(x_shape=[1, 3, 100, 224], y_shape=[1, 1, 1, 224]),
|
||||
# Eltwise
|
||||
@ -248,9 +230,9 @@ class TestAdd(CommonTFLayerTest):
|
||||
@pytest.mark.parametrize("params", test_data_broadcast_4D)
|
||||
@pytest.mark.nightly
|
||||
@pytest.mark.precommit
|
||||
def test_add_placeholder_const_broadcast_4D(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_add_placeholder_const_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version=ir_version, temp_dir=temp_dir)
|
||||
def test_add_placeholder_const_broadcast_4D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_add_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version=ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
||||
test_data_broadcast_5D = [
|
||||
# Power
|
||||
@ -264,7 +246,7 @@ class TestAdd(CommonTFLayerTest):
|
||||
# Eltwise
|
||||
dict(x_shape=[1, 3, 1, 1, 1], y_shape=[3, 1]),
|
||||
# Eltwise
|
||||
dict(x_shape=[1, 3, 1, 1, 2], y_shape=[1, 3, 2]),
|
||||
dict(x_shape=[1, 2, 1, 1, 3], y_shape=[1, 3, 2]),
|
||||
# Eltwise
|
||||
dict(x_shape=[1, 3, 5, 1, 2], y_shape=[5, 3, 2, 1]),
|
||||
# Eltwise
|
||||
@ -276,6 +258,7 @@ class TestAdd(CommonTFLayerTest):
|
||||
@pytest.mark.parametrize("params", test_data_broadcast_5D)
|
||||
@pytest.mark.nightly
|
||||
@pytest.mark.precommit
|
||||
def test_add_placeholder_const_broadcast_5D(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_add_placeholder_const_net(**params, ir_version=ir_version), ie_device, precision,
|
||||
ir_version=ir_version, temp_dir=temp_dir)
|
||||
def test_add_placeholder_const_broadcast_5D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
# we do not perform transpose in the test in case of new frontend
|
||||
self._test(*self.create_add_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision,
|
||||
ir_version=ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
@ -7,7 +7,7 @@ from common.tf_layer_test_class import CommonTFLayerTest
|
||||
|
||||
|
||||
class TestBatchToSpace(CommonTFLayerTest):
|
||||
def create_batch_to_space_net(self, in_shape, crops_value, block_shape_value, out_shape, ir_version):
|
||||
def create_batch_to_space_net(self, in_shape, crops_value, block_shape_value, out_shape, ir_version, use_new_frontend):
|
||||
"""
|
||||
Tensorflow net IR net
|
||||
|
||||
@ -28,7 +28,7 @@ class TestBatchToSpace(CommonTFLayerTest):
|
||||
x = tf.compat.v1.placeholder(tf.float32, in_shape, 'Input')
|
||||
crops = tf.constant(crops_value)
|
||||
block_shape = tf.constant(block_shape_value)
|
||||
tf.batch_to_space(x, block_shape, crops, name='Operation')
|
||||
tf.batch_to_space_nd(x, block_shape, crops, name='Operation')
|
||||
|
||||
tf.compat.v1.global_variables_initializer()
|
||||
tf_net = sess.graph_def
|
||||
@ -61,9 +61,9 @@ class TestBatchToSpace(CommonTFLayerTest):
|
||||
|
||||
@pytest.mark.parametrize("params", test_data_4D)
|
||||
@pytest.mark.nightly
|
||||
def test_batch_to_space_4D(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_batch_to_space_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
def test_batch_to_space_4D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_batch_to_space_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
||||
test_data_5D = [
|
||||
dict(in_shape=[72, 2, 1, 4, 2], block_shape_value=[3, 4, 2], crops_value=[[1, 2], [0, 0], [3, 0]],
|
||||
@ -75,6 +75,6 @@ class TestBatchToSpace(CommonTFLayerTest):
|
||||
|
||||
@pytest.mark.parametrize("params", test_data_5D)
|
||||
@pytest.mark.nightly
|
||||
def test_batch_to_space_5D(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_batch_to_space_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
def test_batch_to_space_5D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_batch_to_space_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
@ -4,10 +4,11 @@
|
||||
import pytest
|
||||
|
||||
from common.tf_layer_test_class import CommonTFLayerTest
|
||||
from common.utils.tf_utils import permute_nchw_to_nhwc
|
||||
|
||||
|
||||
class TestBiasAdd(CommonTFLayerTest):
|
||||
def create_bias_add_placeholder_const_net(self, shape, ir_version):
|
||||
def create_bias_add_placeholder_const_net(self, shape, ir_version, use_new_frontend):
|
||||
"""
|
||||
Tensorflow net IR net
|
||||
|
||||
@ -29,9 +30,8 @@ class TestBiasAdd(CommonTFLayerTest):
|
||||
# Create the graph and model
|
||||
with tf.compat.v1.Session() as sess:
|
||||
tf_x_shape = shape.copy()
|
||||
# reshaping
|
||||
if len(tf_x_shape) >= 3:
|
||||
tf_x_shape.append(tf_x_shape.pop(1))
|
||||
|
||||
tf_x_shape = permute_nchw_to_nhwc(tf_x_shape, use_new_frontend)
|
||||
tf_y_shape = tf_x_shape[-1:]
|
||||
|
||||
x = tf.compat.v1.placeholder(tf.float32, tf_x_shape, 'Input')
|
||||
@ -56,7 +56,7 @@ class TestBiasAdd(CommonTFLayerTest):
|
||||
|
||||
return tf_net, ref_net
|
||||
|
||||
def create_bias_add_2_consts_net(self, shape, ir_version):
|
||||
def create_bias_add_2_consts_net(self, shape, ir_version, use_new_frontend):
|
||||
"""
|
||||
Tensorflow net IR net
|
||||
|
||||
@ -82,9 +82,8 @@ class TestBiasAdd(CommonTFLayerTest):
|
||||
# Create the graph and model
|
||||
with tf.compat.v1.Session() as sess:
|
||||
tf_x_shape = shape.copy()
|
||||
# reshaping
|
||||
if len(tf_x_shape) >= 3:
|
||||
tf_x_shape.append(tf_x_shape.pop(1))
|
||||
|
||||
tf_x_shape = permute_nchw_to_nhwc(tf_x_shape, use_new_frontend)
|
||||
tf_y_shape = tf_x_shape[-1:]
|
||||
|
||||
constant_value_x = np.random.randint(-256, 256, tf_x_shape).astype(np.float32)
|
||||
@ -93,13 +92,10 @@ class TestBiasAdd(CommonTFLayerTest):
|
||||
y = tf.constant(constant_value_y)
|
||||
|
||||
add = tf.nn.bias_add(x, y, name="Operation")
|
||||
add_shape = add.shape.as_list()
|
||||
add_value = add.eval()
|
||||
|
||||
placeholder = tf.compat.v1.placeholder(tf.float32, tf_x_shape, 'Input') # Input_1 in graph_def
|
||||
|
||||
concat = tf.concat([placeholder, add], axis=tf_concat_axis, name='Operation')
|
||||
concat_shape = concat.shape.as_list()
|
||||
|
||||
tf.compat.v1.global_variables_initializer()
|
||||
tf_net = sess.graph_def
|
||||
@ -110,15 +106,6 @@ class TestBiasAdd(CommonTFLayerTest):
|
||||
# Moreover, do not forget to validate ALL layer attributes!!!
|
||||
#
|
||||
|
||||
# Format axis to positive value
|
||||
concat_ax = tf_concat_axis if tf_concat_axis >= 0 else tf_concat_axis + len(shape)
|
||||
if len(shape) >= 3:
|
||||
# Permute shapes to (N,C,...) format
|
||||
order = [0, len(concat_shape) - 1] + list(range(1, len(concat_shape) - 1))
|
||||
concat_shape = [concat_shape[i] for i in order]
|
||||
concat_ax = order.index(concat_ax)
|
||||
add_value = np.transpose(add_value, order)
|
||||
|
||||
ref_net = None
|
||||
|
||||
return tf_net, ref_net
|
||||
@ -130,15 +117,15 @@ class TestBiasAdd(CommonTFLayerTest):
|
||||
|
||||
@pytest.mark.parametrize("params", test_data_2D)
|
||||
@pytest.mark.nightly
|
||||
def test_bias_add_placeholder_const_2D(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_bias_add_placeholder_const_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
def test_bias_add_placeholder_const_2D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_bias_add_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
||||
@pytest.mark.parametrize("params", test_data_2D)
|
||||
@pytest.mark.nightly
|
||||
def test_bias_add_2_consts_2D(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_bias_add_2_consts_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
def test_bias_add_2_consts_2D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_bias_add_2_consts_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
||||
test_data_3D = [
|
||||
pytest.param(dict(shape=[1, 1, 224]), marks=pytest.mark.xfail(reason="*-19053")),
|
||||
@ -147,15 +134,15 @@ class TestBiasAdd(CommonTFLayerTest):
|
||||
|
||||
@pytest.mark.parametrize("params", test_data_3D)
|
||||
@pytest.mark.nightly
|
||||
def test_bias_add_placeholder_const_3D(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_bias_add_placeholder_const_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
def test_bias_add_placeholder_const_3D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_bias_add_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
||||
@pytest.mark.parametrize("params", test_data_3D)
|
||||
@pytest.mark.nightly
|
||||
def test_bias_add_2_consts_3D(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_bias_add_2_consts_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
def test_bias_add_2_consts_3D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_bias_add_2_consts_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
||||
test_data_4D = [
|
||||
dict(shape=[1, 1, 100, 224]),
|
||||
@ -165,15 +152,15 @@ class TestBiasAdd(CommonTFLayerTest):
|
||||
@pytest.mark.parametrize("params", test_data_4D)
|
||||
@pytest.mark.nightly
|
||||
@pytest.mark.precommit
|
||||
def test_bias_add_placeholder_const_4D(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_bias_add_placeholder_const_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
def test_bias_add_placeholder_const_4D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_bias_add_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
||||
@pytest.mark.parametrize("params", test_data_4D)
|
||||
@pytest.mark.nightly
|
||||
def test_bias_add_2_consts_4D(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_bias_add_2_consts_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
def test_bias_add_2_consts_4D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_bias_add_2_consts_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
||||
test_data_5D = [
|
||||
dict(shape=[1, 1, 50, 100, 224]),
|
||||
@ -183,12 +170,12 @@ class TestBiasAdd(CommonTFLayerTest):
|
||||
@pytest.mark.parametrize("params", test_data_5D)
|
||||
@pytest.mark.nightly
|
||||
@pytest.mark.precommit
|
||||
def test_bias_add_placeholder_const_5D(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_bias_add_placeholder_const_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
def test_bias_add_placeholder_const_5D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_bias_add_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
||||
@pytest.mark.parametrize("params", test_data_5D)
|
||||
@pytest.mark.nightly
|
||||
def test_bias_add_2_consts_5D(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_bias_add_2_consts_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
def test_bias_add_2_consts_5D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_bias_add_2_consts_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
129
tests/layer_tests/tensorflow_tests/test_tf_BinaryOps.py
Normal file
129
tests/layer_tests/tensorflow_tests/test_tf_BinaryOps.py
Normal file
@ -0,0 +1,129 @@
|
||||
# Copyright (C) 2018-2021 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import numpy as np
|
||||
import pytest
|
||||
|
||||
from common.tf_layer_test_class import CommonTFLayerTest
|
||||
from common.utils.tf_utils import permute_nchw_to_nhwc
|
||||
|
||||
|
||||
def generate_input(op_type, size):
|
||||
narrow_borders = ["Pow"]
|
||||
|
||||
logical_type = ['LogicalAnd', 'LogicalOr', 'LogicalXor']
|
||||
|
||||
# usual function domain
|
||||
lower = -256
|
||||
upper = 256
|
||||
|
||||
# specific domains
|
||||
if op_type in narrow_borders:
|
||||
lower = 0
|
||||
upper = 16
|
||||
|
||||
if op_type in logical_type:
|
||||
return np.random.randint(0, 1, size).astype(np.bool)
|
||||
elif op_type in narrow_borders:
|
||||
return np.random.uniform(lower, upper, size).astype(np.float32)
|
||||
else:
|
||||
return np.random.uniform(lower, upper, size).astype(np.float32)
|
||||
|
||||
|
||||
class TestBinaryOps(CommonTFLayerTest):
|
||||
def _prepare_input(self, inputs_dict):
|
||||
for input in inputs_dict.keys():
|
||||
inputs_dict[input] = generate_input(self.current_op_type, inputs_dict[input])
|
||||
return inputs_dict
|
||||
|
||||
def create_add_placeholder_const_net(self, x_shape, y_shape, ir_version, op_type, use_new_frontend):
|
||||
"""
|
||||
Tensorflow net IR net
|
||||
|
||||
Placeholder->BinaryOp => Placeholder->Eltwise or Power or ScaleShift
|
||||
/ /
|
||||
Const-------/ Const-------/
|
||||
|
||||
"""
|
||||
|
||||
self.current_op_type = op_type
|
||||
|
||||
#
|
||||
# Create Tensorflow model
|
||||
#
|
||||
import tensorflow as tf
|
||||
|
||||
op_type_to_tf = {
|
||||
'Add': tf.math.add,
|
||||
'Sub': tf.math.subtract,
|
||||
'Mul': tf.math.multiply,
|
||||
'Div': tf.math.divide,
|
||||
'RealDiv': tf.realdiv,
|
||||
'SquaredDifference': tf.math.squared_difference,
|
||||
'Pow': tf.math.pow,
|
||||
'Maximum': tf.math.maximum,
|
||||
'Minimum': tf.math.minimum,
|
||||
'Equal': tf.math.equal,
|
||||
'NotEqual': tf.math.not_equal,
|
||||
'Mod': tf.math.mod,
|
||||
'Greater': tf.math.greater,
|
||||
'GreaterEqual': tf.math.greater_equal,
|
||||
'Less': tf.math.less,
|
||||
'LessEqual': tf.math.less_equal,
|
||||
'LogicalAnd': tf.math.logical_and,
|
||||
'LogicalOr': tf.math.logical_or,
|
||||
'LogicalXor': tf.math.logical_xor,
|
||||
'FloorMod': tf.math.floormod,
|
||||
}
|
||||
|
||||
type = np.float32
|
||||
if op_type in ["LogicalAnd", "LogicalOr", "LogicalXor"]:
|
||||
type = np.bool
|
||||
tf.compat.v1.reset_default_graph()
|
||||
# Create the graph and model
|
||||
with tf.compat.v1.Session() as sess:
|
||||
tf_x_shape = x_shape.copy()
|
||||
tf_y_shape = y_shape.copy()
|
||||
|
||||
tf_x_shape = permute_nchw_to_nhwc(tf_x_shape, use_new_frontend)
|
||||
tf_y_shape = permute_nchw_to_nhwc(tf_y_shape, use_new_frontend)
|
||||
|
||||
x = tf.compat.v1.placeholder(type, tf_x_shape, 'Input')
|
||||
constant_value = generate_input(op_type, tf_y_shape)
|
||||
if (constant_value == 0).all():
|
||||
# Avoid elimination of the layer from IR
|
||||
constant_value = constant_value + 1
|
||||
y = tf.constant(constant_value, dtype=type)
|
||||
|
||||
op = op_type_to_tf[op_type](x, y, name="Operation")
|
||||
|
||||
tf.compat.v1.global_variables_initializer()
|
||||
tf_net = sess.graph_def
|
||||
|
||||
#
|
||||
# Create reference IR net
|
||||
# Please, specify 'type': 'Input' for input node
|
||||
# Moreover, do not forget to validate ALL layer attributes!!!
|
||||
#
|
||||
|
||||
ref_net = None
|
||||
|
||||
return tf_net, ref_net
|
||||
|
||||
test_data_precommits = [dict(x_shape=[2, 3, 4], y_shape=[2, 3, 4]),
|
||||
dict(x_shape=[2, 3, 4, 5], y_shape=[2, 3, 4, 5])]
|
||||
|
||||
@pytest.mark.parametrize("params", test_data_precommits)
|
||||
@pytest.mark.parametrize("op_type",
|
||||
['Add', 'Sub', 'Mul', 'Div', 'RealDiv', 'SquaredDifference', 'Pow', 'Maximum', 'Minimum',
|
||||
'Equal', 'NotEqual', 'Mod', 'Greater', 'GreaterEqual', 'Less', 'LessEqual',
|
||||
'LogicalAnd', 'LogicalOr', 'LogicalXor', 'FloorMod'])
|
||||
@pytest.mark.nightly
|
||||
@pytest.mark.precommit
|
||||
def test_binary_op(self, params, ie_device, precision, ir_version, temp_dir, op_type, use_new_frontend):
|
||||
if ie_device == 'GPU' and precision == "FP16":
|
||||
pytest.skip("BinaryOps tests temporary skipped on GPU with FP16 precision."
|
||||
"Several tests don't pass accuracy checks.")
|
||||
self._test(*self.create_add_placeholder_const_net(**params, ir_version=ir_version, op_type=op_type,
|
||||
use_new_frontend=use_new_frontend), ie_device, precision,
|
||||
ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
@ -10,7 +10,7 @@ from unit_tests.utils.graph import build_graph
|
||||
|
||||
|
||||
class TestBucketize(CommonTFLayerTest):
|
||||
def create_bucketize_net(self, input_shape, input_type, boundaries_size, ir_version):
|
||||
def create_bucketize_net(self, input_shape, input_type, boundaries_size, ir_version, use_new_frontend):
|
||||
"""
|
||||
Tensorflow net: IR net:
|
||||
Input => Input Boundaries
|
||||
@ -30,7 +30,7 @@ class TestBucketize(CommonTFLayerTest):
|
||||
# create reference IR net
|
||||
ref_net = None
|
||||
|
||||
if check_ir_version(10, None, ir_version):
|
||||
if check_ir_version(10, None, ir_version) and not use_new_frontend:
|
||||
nodes_attributes = {
|
||||
'input': {'kind': 'op', 'type': 'Parameter'},
|
||||
'input_data': {'shape': input_shape, 'kind': 'data'},
|
||||
@ -64,9 +64,9 @@ class TestBucketize(CommonTFLayerTest):
|
||||
|
||||
@pytest.mark.parametrize("params", test_data_float32)
|
||||
@pytest.mark.nightly
|
||||
def test_bucketize_float32(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_bucketize_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
def test_bucketize_float32(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_bucketize_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
||||
test_data_int32 = [
|
||||
dict(input_shape=[5], input_type=tf.int32, boundaries_size=1),
|
||||
@ -78,6 +78,6 @@ class TestBucketize(CommonTFLayerTest):
|
||||
|
||||
@pytest.mark.parametrize("params", test_data_int32)
|
||||
@pytest.mark.nightly
|
||||
def test_bucketize_int32(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_bucketize_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
def test_bucketize_int32(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_bucketize_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
@ -1,78 +0,0 @@
|
||||
# Copyright (C) 2018-2021 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import pytest
|
||||
|
||||
from common.layer_test_class import check_ir_version
|
||||
from common.tf_layer_test_class import CommonTFLayerTest
|
||||
from unit_tests.utils.graph import build_graph
|
||||
|
||||
|
||||
class TestCeil(CommonTFLayerTest):
|
||||
def create_ceil_net(self, shape, ir_version):
|
||||
"""
|
||||
Tensorflow net IR net
|
||||
|
||||
Input->Ceil => Input->Ceil
|
||||
|
||||
"""
|
||||
|
||||
#
|
||||
# Create Tensorflow model
|
||||
#
|
||||
|
||||
import tensorflow as tf
|
||||
|
||||
tf.compat.v1.reset_default_graph()
|
||||
|
||||
# Create the graph and model
|
||||
with tf.compat.v1.Session() as sess:
|
||||
shapes = shape.copy()
|
||||
# reshaping
|
||||
if len(shapes) >= 3:
|
||||
shapes.append(shapes.pop(1))
|
||||
input = tf.compat.v1.placeholder(tf.float32, shapes, 'Input')
|
||||
|
||||
tf.math.ceil(input, name='Operation')
|
||||
|
||||
tf.compat.v1.global_variables_initializer()
|
||||
tf_net = sess.graph_def
|
||||
|
||||
ref_net = None
|
||||
|
||||
if check_ir_version(10, None, ir_version):
|
||||
nodes_attributes = {
|
||||
'input': {'kind': 'op', 'type': 'Parameter'},
|
||||
'input_data': {'shape': shape, 'kind': 'data'},
|
||||
'Ceiling': {'kind': 'op', 'type': 'Ceiling'},
|
||||
'Ceiling_data': {'shape': shape, 'kind': 'data'},
|
||||
'result': {'kind': 'op', 'type': 'Result'}
|
||||
}
|
||||
|
||||
ref_net = build_graph(nodes_attributes,
|
||||
[('input', 'input_data'),
|
||||
('input_data', 'Ceiling'),
|
||||
('Ceiling', 'Ceiling_data'),
|
||||
('Ceiling_data', 'result')
|
||||
])
|
||||
|
||||
return tf_net, ref_net
|
||||
|
||||
test_data_precommit = [dict(shape=[3, 2, 3, 7, 6])]
|
||||
|
||||
@pytest.mark.parametrize("params", test_data_precommit)
|
||||
@pytest.mark.precommit
|
||||
def test_ceil_precommit(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_ceil_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
|
||||
test_data = [dict(shape=[1]),
|
||||
dict(shape=[2, 5]),
|
||||
dict(shape=[5, 3, 7, 4]),
|
||||
dict(shape=[3, 2, 3, 7, 6])]
|
||||
|
||||
@pytest.mark.parametrize("params", test_data)
|
||||
@pytest.mark.nightly
|
||||
def test_ceil(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_ceil_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
@ -4,10 +4,10 @@
|
||||
import pytest
|
||||
|
||||
from common.tf_layer_test_class import CommonTFLayerTest
|
||||
|
||||
from common.utils.tf_utils import permute_nchw_to_nhwc
|
||||
|
||||
class TestConcat(CommonTFLayerTest):
|
||||
def create_concat_net(self, shape, axis, ir_version):
|
||||
def create_concat_net(self, shape, axis, ir_version, use_new_frontend):
|
||||
"""
|
||||
Tensorflow net IR net
|
||||
|
||||
@ -28,15 +28,14 @@ class TestConcat(CommonTFLayerTest):
|
||||
|
||||
ax = axis
|
||||
|
||||
input_shape_x = shape.copy()
|
||||
# reshaping
|
||||
if len(input_shape_x) >= 3:
|
||||
input_shape_x.append(input_shape_x.pop(1))
|
||||
tf_x_shape = shape.copy()
|
||||
|
||||
tf_x_shape = permute_nchw_to_nhwc(tf_x_shape, use_new_frontend)
|
||||
|
||||
# TODO: add concat with const inputs to check fusing (as in ONNX)
|
||||
|
||||
x = tf.compat.v1.placeholder(tf.float32, input_shape_x, 'Input')
|
||||
y = tf.compat.v1.placeholder(tf.float32, input_shape_x, 'Input') # Input_1 in graph_def
|
||||
x = tf.compat.v1.placeholder(tf.float32, tf_x_shape, 'Input')
|
||||
y = tf.compat.v1.placeholder(tf.float32, tf_x_shape, 'Input') # Input_1 in graph_def
|
||||
|
||||
concat = tf.concat([x, y], axis=ax, name='Operation')
|
||||
concat_shape = concat.shape.as_list()
|
||||
@ -50,14 +49,6 @@ class TestConcat(CommonTFLayerTest):
|
||||
# Moreover, do not forget to validate ALL layer attributes!!!
|
||||
#
|
||||
|
||||
# Format axis to positive value
|
||||
concat_ax = axis if axis >= 0 else axis + len(shape)
|
||||
if len(shape) >= 3:
|
||||
# Permute shape to (N,C,...) format and compute correct axis value
|
||||
order = [0, len(concat_shape) - 1] + list(range(1, len(concat_shape) - 1))
|
||||
concat_shape = [concat_shape[i] for i in order]
|
||||
concat_ax = order.index(concat_ax)
|
||||
|
||||
ref_net = None
|
||||
|
||||
return tf_net, ref_net
|
||||
@ -69,18 +60,18 @@ class TestConcat(CommonTFLayerTest):
|
||||
|
||||
@pytest.mark.parametrize("params", test_data_1D)
|
||||
@pytest.mark.nightly
|
||||
def test_concat_1D(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_concat_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
def test_concat_1D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_concat_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
||||
test_data_2D = [dict(shape=[1, 224], axis=0),
|
||||
dict(shape=[1, 224], axis=-1)]
|
||||
|
||||
@pytest.mark.parametrize("params", test_data_2D)
|
||||
@pytest.mark.nightly
|
||||
def test_concat_2D(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_concat_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
def test_concat_2D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_concat_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
||||
test_data_3D = [pytest.param(dict(shape=[1, 3, 224], axis=0), marks=pytest.mark.xfail(reason="*-19053")),
|
||||
pytest.param(dict(shape=[1, 3, 224], axis=-1), marks=pytest.mark.xfail(reason="*-19053")),
|
||||
@ -88,9 +79,9 @@ class TestConcat(CommonTFLayerTest):
|
||||
|
||||
@pytest.mark.parametrize("params", test_data_3D)
|
||||
@pytest.mark.nightly
|
||||
def test_concat_3D(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_concat_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
def test_concat_3D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_concat_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
||||
test_data_4D = [dict(shape=[1, 3, 100, 224], axis=0),
|
||||
dict(shape=[1, 3, 100, 224], axis=-1),
|
||||
@ -99,9 +90,9 @@ class TestConcat(CommonTFLayerTest):
|
||||
@pytest.mark.parametrize("params", test_data_4D)
|
||||
@pytest.mark.nightly
|
||||
@pytest.mark.precommit
|
||||
def test_concat_4D(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_concat_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
def test_concat_4D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_concat_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
||||
test_data_5D = [dict(shape=[1, 3, 50, 100, 224], axis=0),
|
||||
dict(shape=[1, 3, 50, 100, 224], axis=-1),
|
||||
@ -109,6 +100,6 @@ class TestConcat(CommonTFLayerTest):
|
||||
|
||||
@pytest.mark.parametrize("params", test_data_5D)
|
||||
@pytest.mark.nightly
|
||||
def test_concat_5D(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_concat_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
def test_concat_5D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_concat_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
@ -1,88 +0,0 @@
|
||||
# Copyright (C) 2018-2021 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import pytest
|
||||
from common.layer_test_class import check_ir_version
|
||||
from common.tf_layer_test_class import CommonTFLayerTest
|
||||
from unit_tests.utils.graph import build_graph
|
||||
|
||||
|
||||
class TestELU(CommonTFLayerTest):
|
||||
def create_elu_net(self, shape, ir_version):
|
||||
"""
|
||||
Tensorflow net IR net
|
||||
|
||||
Input->ELU => Input->ELU
|
||||
|
||||
"""
|
||||
|
||||
#
|
||||
# Create Tensorflow model
|
||||
#
|
||||
|
||||
import tensorflow as tf
|
||||
|
||||
tf.compat.v1.reset_default_graph()
|
||||
|
||||
# Create the graph and model
|
||||
with tf.compat.v1.Session() as sess:
|
||||
|
||||
shapes = shape.copy()
|
||||
# reshaping
|
||||
if len(shapes) >= 4:
|
||||
shapes.append(shapes.pop(1))
|
||||
input = tf.compat.v1.placeholder(tf.float32, shapes, 'Input')
|
||||
|
||||
tf.nn.elu(input, name='Operation')
|
||||
|
||||
tf.compat.v1.global_variables_initializer()
|
||||
tf_net = sess.graph_def
|
||||
|
||||
#
|
||||
# Create reference IR net
|
||||
# Please, specify 'type': 'Input' for input node
|
||||
# Moreover, do not forget to validate ALL layer attributes!!!
|
||||
#
|
||||
|
||||
ref_net = None
|
||||
|
||||
if check_ir_version(10, None, ir_version):
|
||||
nodes_attributes = {
|
||||
'input': {'kind': 'op', 'type': 'Parameter'},
|
||||
'input_data': {'shape': shape, 'kind': 'data'},
|
||||
'ELU': {'kind': 'op', 'type': 'Elu'},
|
||||
'ELU_data': {'shape': shape, 'kind': 'data'},
|
||||
'result': {'kind': 'op', 'type': 'Result'}
|
||||
}
|
||||
|
||||
ref_net = build_graph(nodes_attributes,
|
||||
[('input', 'input_data'),
|
||||
('input_data', 'ELU'),
|
||||
('ELU', 'ELU_data'),
|
||||
('ELU_data', 'result')
|
||||
])
|
||||
|
||||
return tf_net, ref_net
|
||||
|
||||
test_data_precommit = [dict(shape=[4, 6, 8, 10, 12])]
|
||||
|
||||
@pytest.mark.parametrize("params", test_data_precommit)
|
||||
@pytest.mark.precommit
|
||||
def test_elu_precommit(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
if ie_device == 'GPU':
|
||||
pytest.skip("5D tensors is not supported on GPU")
|
||||
self._test(*self.create_elu_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
|
||||
test_data = [dict(shape=[10, 12]),
|
||||
dict(shape=[8, 10, 12]),
|
||||
dict(shape=[6, 8, 10, 12]),
|
||||
dict(shape=[4, 6, 8, 10, 12])]
|
||||
|
||||
@pytest.mark.parametrize("params", test_data)
|
||||
@pytest.mark.nightly
|
||||
def test_elu(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
if ie_device == 'GPU':
|
||||
pytest.skip("5D tensors is not supported on GPU")
|
||||
self._test(*self.create_elu_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
@ -4,10 +4,11 @@
|
||||
import pytest
|
||||
|
||||
from common.tf_layer_test_class import CommonTFLayerTest
|
||||
from common.utils.tf_utils import permute_nchw_to_nhwc
|
||||
|
||||
|
||||
class TestEltwise(CommonTFLayerTest):
|
||||
def create_eltwise_net(self, shape, operation, ir_version):
|
||||
def create_eltwise_net(self, shape, operation, ir_version, use_new_frontend):
|
||||
"""
|
||||
Tensorflow net IR net
|
||||
|
||||
@ -26,13 +27,12 @@ class TestEltwise(CommonTFLayerTest):
|
||||
# Create the graph and model
|
||||
with tf.compat.v1.Session() as sess:
|
||||
|
||||
shapes = shape.copy()
|
||||
# reshaping
|
||||
if len(shapes) >= 4:
|
||||
shapes.append(shapes.pop(1))
|
||||
tf_x_shape = shape.copy()
|
||||
|
||||
x = tf.compat.v1.placeholder(tf.float32, shapes, 'Input')
|
||||
y = tf.compat.v1.placeholder(tf.float32, shapes, 'Input') # Input_1 in graph_def
|
||||
tf_x_shape = permute_nchw_to_nhwc(tf_x_shape, use_new_frontend)
|
||||
|
||||
x = tf.compat.v1.placeholder(tf.float32, tf_x_shape, 'Input')
|
||||
y = tf.compat.v1.placeholder(tf.float32, tf_x_shape, 'Input') # Input_1 in graph_def
|
||||
|
||||
if operation == 'sum':
|
||||
tf.add(x, y, name='Operation')
|
||||
@ -62,9 +62,9 @@ class TestEltwise(CommonTFLayerTest):
|
||||
|
||||
@pytest.mark.parametrize("params", test_data)
|
||||
@pytest.mark.nightly
|
||||
def test_eltwise(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_eltwise_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
def test_eltwise(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_eltwise_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
||||
test_data_5D = []
|
||||
for operation in ['sum', 'max', 'mul']:
|
||||
@ -72,8 +72,8 @@ class TestEltwise(CommonTFLayerTest):
|
||||
|
||||
@pytest.mark.parametrize("params", test_data_5D)
|
||||
@pytest.mark.precommit
|
||||
def test_eltwise_5D_precommit(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
def test_eltwise_5D_precommit(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
if ie_device == 'GPU':
|
||||
pytest.skip("5D tensors is not supported on GPU")
|
||||
self._test(*self.create_eltwise_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
self._test(*self.create_eltwise_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
@ -35,7 +35,7 @@ class TestFakeQuantize(CommonTFLayerTest):
|
||||
expected_nudged_input_max + expected_step
|
||||
])}
|
||||
|
||||
def create_fake_quantize_net(self, il, ih, num_bits, narrow_range, nudged_il, nudged_ih, expected_step, ir_version):
|
||||
def create_fake_quantize_net(self, il, ih, num_bits, narrow_range, nudged_il, nudged_ih, expected_step, ir_version, use_new_frontend):
|
||||
# original tf model
|
||||
import tensorflow as tf
|
||||
tf.compat.v1.reset_default_graph()
|
||||
@ -50,7 +50,7 @@ class TestFakeQuantize(CommonTFLayerTest):
|
||||
|
||||
# reference graph to compare with IR
|
||||
ref_net = None
|
||||
if check_ir_version(10, None, ir_version):
|
||||
if check_ir_version(10, None, ir_version) and not use_new_frontend:
|
||||
levels = 2 ** num_bits - int(narrow_range)
|
||||
|
||||
# data (shape, value) -> const (shape, vale) -> data (shape, no value)
|
||||
@ -120,6 +120,6 @@ class TestFakeQuantize(CommonTFLayerTest):
|
||||
|
||||
@pytest.mark.parametrize("params", test_data)
|
||||
@pytest.mark.nightly
|
||||
def test_fake_quantize(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_fake_quantize_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
|
||||
kwargs_to_prepare_input=params, temp_dir=temp_dir)
|
||||
def test_fake_quantize(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_fake_quantize_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, ir_version,
|
||||
kwargs_to_prepare_input=params, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
@ -1,78 +0,0 @@
|
||||
# Copyright (C) 2018-2021 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import pytest
|
||||
|
||||
from common.layer_test_class import check_ir_version
|
||||
from common.tf_layer_test_class import CommonTFLayerTest
|
||||
from unit_tests.utils.graph import build_graph
|
||||
|
||||
|
||||
class TestFloor(CommonTFLayerTest):
|
||||
def create_floor_net(self, shape, ir_version):
|
||||
"""
|
||||
Tensorflow net IR net
|
||||
|
||||
Input->Floor => Input->Floor
|
||||
|
||||
"""
|
||||
|
||||
#
|
||||
# Create Tensorflow model
|
||||
#
|
||||
|
||||
import tensorflow as tf
|
||||
|
||||
tf.compat.v1.reset_default_graph()
|
||||
|
||||
# Create the graph and model
|
||||
with tf.compat.v1.Session() as sess:
|
||||
shapes = shape.copy()
|
||||
# reshaping
|
||||
if len(shapes) >= 3:
|
||||
shapes.append(shapes.pop(1))
|
||||
input = tf.compat.v1.placeholder(tf.float32, shapes, 'Input')
|
||||
|
||||
tf.floor(input, name='Operation')
|
||||
|
||||
tf.compat.v1.global_variables_initializer()
|
||||
tf_net = sess.graph_def
|
||||
|
||||
ref_net = None
|
||||
|
||||
if check_ir_version(10, None, ir_version):
|
||||
nodes_attributes = {
|
||||
'input': {'kind': 'op', 'type': 'Parameter'},
|
||||
'input_data': {'shape': shape, 'kind': 'data'},
|
||||
'Floor': {'kind': 'op', 'type': 'Floor'},
|
||||
'Floor_data': {'shape': shape, 'kind': 'data'},
|
||||
'result': {'kind': 'op', 'type': 'Result'}
|
||||
}
|
||||
|
||||
ref_net = build_graph(nodes_attributes,
|
||||
[('input', 'input_data'),
|
||||
('input_data', 'Floor'),
|
||||
('Floor', 'Floor_data'),
|
||||
('Floor_data', 'result')
|
||||
])
|
||||
|
||||
return tf_net, ref_net
|
||||
|
||||
test_data_precommit = [dict(shape=[3, 2, 3, 7, 6])]
|
||||
|
||||
@pytest.mark.parametrize("params", test_data_precommit)
|
||||
@pytest.mark.precommit
|
||||
def test_floor_precommit(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_floor_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
|
||||
test_data = [dict(shape=[1]),
|
||||
dict(shape=[2, 5]),
|
||||
dict(shape=[5, 3, 7, 4]),
|
||||
dict(shape=[3, 2, 3, 7, 6])]
|
||||
|
||||
@pytest.mark.parametrize("params", test_data)
|
||||
@pytest.mark.nightly
|
||||
def test_floor(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_floor_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
@ -11,7 +11,7 @@ class TestGather(CommonTFLayerTest):
|
||||
def create_indices_constant(self):
|
||||
pass
|
||||
|
||||
def create_gather_net(self, data_shape, indices, axis, batch_dims, **kwargs):
|
||||
def create_gather_net(self, data_shape, indices, axis, batch_dims, use_new_frontend, **kwargs):
|
||||
import tensorflow as tf
|
||||
|
||||
tf.compat.v1.reset_default_graph()
|
||||
@ -36,9 +36,9 @@ class TestGather(CommonTFLayerTest):
|
||||
|
||||
@pytest.mark.parametrize("params", test_data_precommit)
|
||||
@pytest.mark.precommit
|
||||
def test_gather(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_gather_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
def test_gather(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_gather_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
||||
test_data_nightly = [
|
||||
dict(data_shape=[2, 3], axis=1, indices=[0, 2], batch_dims=0),
|
||||
@ -56,6 +56,6 @@ class TestGather(CommonTFLayerTest):
|
||||
|
||||
@pytest.mark.parametrize("params", test_data_nightly)
|
||||
@pytest.mark.nightly
|
||||
def test_gather_nightly(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_gather_net(**params),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
def test_gather_nightly(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_gather_net(**params, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
@ -5,11 +5,12 @@ import pytest
|
||||
|
||||
from common.layer_test_class import check_ir_version
|
||||
from common.tf_layer_test_class import CommonTFLayerTest
|
||||
from common.utils.tf_utils import permute_nchw_to_nhwc
|
||||
from unit_tests.utils.graph import build_graph
|
||||
|
||||
|
||||
class TestIdentity(CommonTFLayerTest):
|
||||
def create_identity_net(self, shape, ir_version):
|
||||
def create_identity_net(self, shape, ir_version, use_new_frontend):
|
||||
"""
|
||||
Tensorflow net IR net
|
||||
|
||||
@ -27,12 +28,11 @@ class TestIdentity(CommonTFLayerTest):
|
||||
|
||||
# Create the graph and model
|
||||
with tf.compat.v1.Session() as sess:
|
||||
x_shape = shape.copy()
|
||||
# reshaping
|
||||
if len(x_shape) >= 3:
|
||||
x_shape.append(x_shape.pop(1))
|
||||
tf_x_shape = shape.copy()
|
||||
|
||||
x = tf.compat.v1.placeholder(tf.float32, x_shape, 'Input')
|
||||
tf_x_shape = permute_nchw_to_nhwc(tf_x_shape, use_new_frontend)
|
||||
|
||||
x = tf.compat.v1.placeholder(tf.float32, tf_x_shape, 'Input')
|
||||
id = tf.identity(x, name="Operation")
|
||||
tf.nn.relu(id, name='Operation')
|
||||
|
||||
@ -47,7 +47,7 @@ class TestIdentity(CommonTFLayerTest):
|
||||
|
||||
ref_net = None
|
||||
|
||||
if check_ir_version(10, None, ir_version):
|
||||
if check_ir_version(10, None, ir_version) and not use_new_frontend:
|
||||
nodes_attributes = {
|
||||
'inputX': {'kind': 'op', 'type': 'Parameter'},
|
||||
'inputX_data': {'shape': shape, 'kind': 'data'},
|
||||
@ -68,9 +68,9 @@ class TestIdentity(CommonTFLayerTest):
|
||||
|
||||
@pytest.mark.parametrize("params", test_data_precommit)
|
||||
@pytest.mark.precommit
|
||||
def test_identity_precommit(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_identity_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
def test_identity_precommit(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_identity_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
||||
test_data = [dict(shape=[1]),
|
||||
dict(shape=[1, 224]),
|
||||
@ -80,6 +80,6 @@ class TestIdentity(CommonTFLayerTest):
|
||||
|
||||
@pytest.mark.parametrize("params", test_data)
|
||||
@pytest.mark.nightly
|
||||
def test_identity(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_identity_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
def test_identity(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_identity_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
@ -1,79 +0,0 @@
|
||||
# Copyright (C) 2018-2021 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import pytest
|
||||
|
||||
from common.layer_test_class import check_ir_version
|
||||
from common.tf_layer_test_class import CommonTFLayerTest
|
||||
from unit_tests.utils.graph import build_graph
|
||||
|
||||
|
||||
class TestLog(CommonTFLayerTest):
|
||||
def create_log_net(self, shape, ir_version):
|
||||
"""
|
||||
Tensorflow net IR net
|
||||
|
||||
Input->Log => Input->Log
|
||||
|
||||
"""
|
||||
|
||||
#
|
||||
# Create Tensorflow model
|
||||
#
|
||||
|
||||
import tensorflow as tf
|
||||
|
||||
tf.compat.v1.reset_default_graph()
|
||||
|
||||
# Create the graph and model
|
||||
with tf.compat.v1.Session() as sess:
|
||||
shapes = shape.copy()
|
||||
# reshaping
|
||||
if len(shapes) >= 3:
|
||||
shapes.append(shapes.pop(1))
|
||||
input = tf.compat.v1.placeholder(tf.float32, shapes, 'Input')
|
||||
|
||||
tf.math.log(input, name='Operation')
|
||||
|
||||
tf.compat.v1.global_variables_initializer()
|
||||
tf_net = sess.graph_def
|
||||
|
||||
ref_net = None
|
||||
|
||||
if check_ir_version(10, None, ir_version):
|
||||
nodes_attributes = {
|
||||
'input': {'kind': 'op', 'type': 'Parameter'},
|
||||
'input_data': {'shape': shape, 'kind': 'data'},
|
||||
'log': {'kind': 'op', 'type': 'Log'},
|
||||
'log_data': {'shape': shape, 'kind': 'data'},
|
||||
'result': {'kind': 'op', 'type': 'Result'}
|
||||
}
|
||||
|
||||
ref_net = build_graph(nodes_attributes,
|
||||
[('input', 'input_data'),
|
||||
('input_data', 'log'),
|
||||
('log', 'log_data'),
|
||||
('log_data', 'result')
|
||||
])
|
||||
|
||||
return tf_net, ref_net
|
||||
|
||||
test_data_precommit = [
|
||||
pytest.param(dict(shape=[3, 2, 3, 7, 6]), marks=pytest.mark.skip(reason="Skipped until fixed"))]
|
||||
|
||||
@pytest.mark.parametrize("params", test_data_precommit)
|
||||
@pytest.mark.precommit
|
||||
def test_log_precommit(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_log_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
|
||||
test_data = [dict(shape=[1]),
|
||||
dict(shape=[2, 5]),
|
||||
dict(shape=[5, 3, 7, 4]),
|
||||
dict(shape=[3, 2, 3, 7, 6])]
|
||||
|
||||
@pytest.mark.parametrize("params", test_data)
|
||||
@pytest.mark.nightly
|
||||
def test_log(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_log_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
@ -7,12 +7,13 @@ import numpy as np
|
||||
import pytest
|
||||
from common.layer_test_class import check_ir_version
|
||||
from common.tf_layer_test_class import CommonTFLayerTest
|
||||
from common.utils.tf_utils import permute_nchw_to_nhwc
|
||||
from mo.front.common.partial_infer.utils import int64_array
|
||||
from unit_tests.utils.graph import build_graph
|
||||
|
||||
|
||||
class TestLogSoftmax(CommonTFLayerTest):
|
||||
def create_log_softmax_net(self, shape, reduction_axis, ir_version):
|
||||
def create_log_softmax_net(self, shape, reduction_axis, ir_version, use_new_frontend):
|
||||
"""
|
||||
Tensorflow net IR net
|
||||
|
||||
@ -30,11 +31,10 @@ class TestLogSoftmax(CommonTFLayerTest):
|
||||
|
||||
# Create the graph and model
|
||||
with tf.compat.v1.Session() as sess:
|
||||
shapes = shape.copy()
|
||||
# reshaping
|
||||
if len(shapes) >= 3:
|
||||
shapes.append(shapes.pop(1))
|
||||
input = tf.compat.v1.placeholder(tf.float32, shapes, 'Input')
|
||||
tf_x_shape = shape.copy()
|
||||
|
||||
tf_x_shape = permute_nchw_to_nhwc(tf_x_shape, use_new_frontend)
|
||||
input = tf.compat.v1.placeholder(tf.float32, tf_x_shape, 'Input')
|
||||
if LooseVersion(tf.__version__) < LooseVersion('2.0.0'):
|
||||
tf.nn.log_softmax(input, name='Operation', axis=reduction_axis)
|
||||
else:
|
||||
@ -57,7 +57,7 @@ class TestLogSoftmax(CommonTFLayerTest):
|
||||
reduce_sum_shape[reduction_axis] = 1
|
||||
|
||||
converted_shape = shape if rank != 1 else shape[0]
|
||||
if check_ir_version(10, None, ir_version):
|
||||
if check_ir_version(10, None, ir_version) and not use_new_frontend:
|
||||
ref_nodes_attributes = {
|
||||
'input': {'kind': 'op', 'type': 'Parameter', 'shape': converted_shape},
|
||||
'input_data': {'shape': shape, 'kind': 'data', 'value': None},
|
||||
@ -122,9 +122,9 @@ class TestLogSoftmax(CommonTFLayerTest):
|
||||
|
||||
@pytest.mark.parametrize("params", test_data_precommit)
|
||||
@pytest.mark.precommit
|
||||
def test_log_softmax_precommit(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_log_softmax_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
def test_log_softmax_precommit(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_log_softmax_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
||||
test_data = [dict(shape=[1], reduction_axis=-1),
|
||||
dict(shape=[2, 5], reduction_axis=-1),
|
||||
@ -133,6 +133,6 @@ class TestLogSoftmax(CommonTFLayerTest):
|
||||
|
||||
@pytest.mark.parametrize("params", test_data)
|
||||
@pytest.mark.nightly
|
||||
def test_log_softmax(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_log_softmax_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
def test_log_softmax(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_log_softmax_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
@ -5,10 +5,11 @@ import numpy as np
|
||||
import pytest
|
||||
|
||||
from common.tf_layer_test_class import CommonTFLayerTest
|
||||
from common.utils.tf_utils import permute_nchw_to_nhwc
|
||||
|
||||
|
||||
class TestMul(CommonTFLayerTest):
|
||||
def create_mul_placeholder_const_net(self, x_shape, y_shape, ir_version):
|
||||
def create_mul_placeholder_const_net(self, x_shape, y_shape, ir_version, use_new_frontend):
|
||||
"""
|
||||
Tensorflow net IR net
|
||||
|
||||
@ -30,11 +31,9 @@ class TestMul(CommonTFLayerTest):
|
||||
with tf.compat.v1.Session() as sess:
|
||||
tf_x_shape = x_shape.copy()
|
||||
tf_y_shape = y_shape.copy()
|
||||
# reshaping
|
||||
if len(tf_x_shape) >= 3:
|
||||
tf_x_shape.append(tf_x_shape.pop(1))
|
||||
if len(tf_y_shape) >= 3:
|
||||
tf_y_shape.append(tf_y_shape.pop(1))
|
||||
|
||||
tf_x_shape = permute_nchw_to_nhwc(tf_x_shape, use_new_frontend)
|
||||
tf_y_shape = permute_nchw_to_nhwc(tf_y_shape, use_new_frontend)
|
||||
|
||||
x = tf.compat.v1.placeholder(tf.float32, tf_x_shape, 'Input')
|
||||
constant_value = np.random.randint(-255, 255, tf_y_shape).astype(np.float32)
|
||||
@ -44,7 +43,6 @@ class TestMul(CommonTFLayerTest):
|
||||
y = tf.constant(constant_value)
|
||||
|
||||
mul = tf.multiply(x, y, name="Operation")
|
||||
mul_shape = mul.shape.as_list()
|
||||
|
||||
tf.compat.v1.global_variables_initializer()
|
||||
tf_net = sess.graph_def
|
||||
@ -55,23 +53,6 @@ class TestMul(CommonTFLayerTest):
|
||||
# Moreover, do not forget to validate ALL layer attributes!!!
|
||||
#
|
||||
|
||||
if len(mul_shape) >= 3:
|
||||
# Permute mul_shape to (N,C,...) format
|
||||
order = [0, len(mul_shape) - 1] + list(range(1, len(mul_shape) - 1))
|
||||
mul_shape = [mul_shape[i] for i in order]
|
||||
|
||||
y_shape_to_compare = tf_y_shape.copy()
|
||||
while len(y_shape_to_compare) < len(x_shape):
|
||||
# Expand shape of constant with 1
|
||||
y_shape_to_compare = [1] + y_shape_to_compare
|
||||
constant_value = np.expand_dims(constant_value, axis=0)
|
||||
|
||||
if len(y_shape_to_compare) >= 3:
|
||||
# Permute constant_value to (N,C,...) format for correct further reshape
|
||||
order = [0, len(y_shape_to_compare) - 1] + list(range(1, len(y_shape_to_compare) - 1))
|
||||
y_shape_to_compare = [y_shape_to_compare[i] for i in order]
|
||||
constant_value = np.transpose(constant_value, order)
|
||||
|
||||
ref_net = None
|
||||
|
||||
return tf_net, ref_net
|
||||
@ -87,9 +68,9 @@ class TestMul(CommonTFLayerTest):
|
||||
|
||||
@pytest.mark.parametrize("params", test_data_1D)
|
||||
@pytest.mark.nightly
|
||||
def test_mul_placeholder_const_1D(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_mul_placeholder_const_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
def test_mul_placeholder_const_1D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_mul_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
||||
test_data_2D = [
|
||||
# Power
|
||||
@ -104,9 +85,9 @@ class TestMul(CommonTFLayerTest):
|
||||
|
||||
@pytest.mark.parametrize("params", test_data_2D)
|
||||
@pytest.mark.nightly
|
||||
def test_mul_placeholder_const_2D(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_mul_placeholder_const_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
def test_mul_placeholder_const_2D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_mul_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
||||
test_data_3D = [
|
||||
# Power
|
||||
@ -122,9 +103,9 @@ class TestMul(CommonTFLayerTest):
|
||||
|
||||
@pytest.mark.parametrize("params", test_data_3D)
|
||||
@pytest.mark.nightly
|
||||
def test_mul_placeholder_const_3D(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_mul_placeholder_const_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
def test_mul_placeholder_const_3D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_mul_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
||||
test_data_4D = [
|
||||
# Power
|
||||
@ -139,9 +120,9 @@ class TestMul(CommonTFLayerTest):
|
||||
|
||||
@pytest.mark.parametrize("params", test_data_4D)
|
||||
@pytest.mark.nightly
|
||||
def test_mul_placeholder_const_4D(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_mul_placeholder_const_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
def test_mul_placeholder_const_4D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_mul_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
||||
test_data_5D = [
|
||||
# Power
|
||||
@ -158,9 +139,9 @@ class TestMul(CommonTFLayerTest):
|
||||
# TODO mark as precommit (after successfully passing in nightly)
|
||||
@pytest.mark.parametrize("params", test_data_5D)
|
||||
@pytest.mark.nightly
|
||||
def test_mul_placeholder_const_5D(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_mul_placeholder_const_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
def test_mul_placeholder_const_5D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_mul_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
||||
###############################################################################################
|
||||
# #
|
||||
@ -174,9 +155,9 @@ class TestMul(CommonTFLayerTest):
|
||||
|
||||
@pytest.mark.parametrize("params", test_data_broadcast_1D)
|
||||
@pytest.mark.nightly
|
||||
def test_mul_placeholder_const_broadcast_1D(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_mul_placeholder_const_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
def test_mul_placeholder_const_broadcast_1D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_mul_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
||||
test_data_broadcast_2D = [
|
||||
# Power
|
||||
@ -193,9 +174,9 @@ class TestMul(CommonTFLayerTest):
|
||||
|
||||
@pytest.mark.parametrize("params", test_data_broadcast_2D)
|
||||
@pytest.mark.nightly
|
||||
def test_mul_placeholder_const_broadcast_2D(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_mul_placeholder_const_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
def test_mul_placeholder_const_broadcast_2D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_mul_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
||||
test_data_broadcast_3D = [
|
||||
# Power
|
||||
@ -217,9 +198,9 @@ class TestMul(CommonTFLayerTest):
|
||||
# TODO mark as precommit (after successfully passing in nightly)
|
||||
@pytest.mark.parametrize("params", test_data_broadcast_3D)
|
||||
@pytest.mark.nightly
|
||||
def test_mul_placeholder_const_broadcast_3D(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_mul_placeholder_const_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
def test_mul_placeholder_const_broadcast_3D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_mul_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
||||
test_data_broadcast_4D = [
|
||||
# Power
|
||||
@ -235,9 +216,9 @@ class TestMul(CommonTFLayerTest):
|
||||
# Eltwise
|
||||
dict(x_shape=[1, 3, 1, 1], y_shape=[3, 1]),
|
||||
# Eltwise
|
||||
dict(x_shape=[1, 3, 1, 2], y_shape=[3, 1, 2]),
|
||||
dict(x_shape=[1, 2, 1, 3], y_shape=[3, 1, 2]),
|
||||
# Eltwise
|
||||
dict(x_shape=[1, 3, 1, 2], y_shape=[1, 3, 2]),
|
||||
dict(x_shape=[1, 2, 1, 3], y_shape=[1, 3, 2]),
|
||||
# Eltwise
|
||||
dict(x_shape=[1, 3, 100, 224], y_shape=[1, 1, 1, 224]),
|
||||
# Eltwise
|
||||
@ -247,9 +228,9 @@ class TestMul(CommonTFLayerTest):
|
||||
@pytest.mark.parametrize("params", test_data_broadcast_4D)
|
||||
@pytest.mark.nightly
|
||||
@pytest.mark.precommit
|
||||
def test_mul_placeholder_const_broadcast_4D(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_mul_placeholder_const_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
def test_mul_placeholder_const_broadcast_4D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_mul_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
||||
test_data_broadcast_5D = [
|
||||
# Power
|
||||
@ -274,6 +255,6 @@ class TestMul(CommonTFLayerTest):
|
||||
|
||||
@pytest.mark.parametrize("params", test_data_broadcast_5D)
|
||||
@pytest.mark.nightly
|
||||
def test_mul_placeholder_const_broadcast_5D(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_mul_placeholder_const_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
def test_mul_placeholder_const_broadcast_5D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_mul_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
@ -7,7 +7,7 @@ import pytest
|
||||
from common.tf_layer_test_class import CommonTFLayerTest
|
||||
from mo.front.common.partial_infer.utils import int64_array
|
||||
from unit_tests.utils.graph import build_graph
|
||||
from tensorflow_tests.permutation_utils import permute_nchw_to_nhwc, permute_nhwc_to_nchw
|
||||
from common.utils.tf_utils import permute_nchw_to_nhwc, permute_nchw_to_nhwc
|
||||
|
||||
|
||||
class TestNormalizeL2(CommonTFLayerTest):
|
||||
@ -33,7 +33,7 @@ class TestNormalizeL2(CommonTFLayerTest):
|
||||
return tf_net
|
||||
|
||||
@staticmethod
|
||||
def create_normalize_l2_net_fusable(shape, axes, output_axes, ir_version):
|
||||
def create_normalize_l2_net_fusable(shape, axes, output_axes, ir_version, use_new_frontend):
|
||||
tf_net = TestNormalizeL2.build_tf_graph(shape, axes)
|
||||
|
||||
nodes_attributes = {
|
||||
@ -57,16 +57,18 @@ class TestNormalizeL2(CommonTFLayerTest):
|
||||
('normalize_l2_data', 'result'),
|
||||
])
|
||||
|
||||
if use_new_frontend:
|
||||
ref_net = None
|
||||
return tf_net, ref_net
|
||||
|
||||
@staticmethod
|
||||
def create_normalize_l2_net_non_fusable(shape, axes, output_axes, ir_version):
|
||||
def create_normalize_l2_net_non_fusable(shape, axes, output_axes, ir_version, use_new_frontend):
|
||||
tf_net = TestNormalizeL2.build_tf_graph(shape, axes)
|
||||
|
||||
reduced_shape = permute_nchw_to_nhwc(shape).copy()
|
||||
for axis in axes:
|
||||
reduced_shape[axis] = 1
|
||||
reduced_shape = permute_nhwc_to_nchw(reduced_shape)
|
||||
reduced_shape = permute_nchw_to_nhwc(reduced_shape)
|
||||
|
||||
eltwise_shapes = int64_array(np.ones(len(shape)))
|
||||
nodes_attributes = {
|
||||
@ -136,6 +138,8 @@ class TestNormalizeL2(CommonTFLayerTest):
|
||||
('multiply_data', 'result'),
|
||||
])
|
||||
|
||||
if use_new_frontend:
|
||||
ref_net = None
|
||||
return tf_net, ref_net
|
||||
|
||||
test_data_fusable_precommit = [
|
||||
@ -147,9 +151,9 @@ class TestNormalizeL2(CommonTFLayerTest):
|
||||
|
||||
@pytest.mark.parametrize("params", test_data_fusable_precommit)
|
||||
@pytest.mark.precommit
|
||||
def test_NormalizeL2_fusable_precommit(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_normalize_l2_net_fusable(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
def test_NormalizeL2_fusable_precommit(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_normalize_l2_net_fusable(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
||||
test_data_non_fusable_precommit = [
|
||||
pytest.param(dict(shape=[2, 3, 5], axes=[0, 1, 2], output_axes=[0, 1, 2]),
|
||||
@ -162,10 +166,10 @@ class TestNormalizeL2(CommonTFLayerTest):
|
||||
|
||||
@pytest.mark.parametrize("params", test_data_non_fusable_precommit)
|
||||
@pytest.mark.precommit
|
||||
def test_NormalizeL2_non_fusable_precommit(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_normalize_l2_net_non_fusable(**params, ir_version=ir_version),
|
||||
def test_NormalizeL2_non_fusable_precommit(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_normalize_l2_net_non_fusable(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version,
|
||||
temp_dir=temp_dir)
|
||||
temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
||||
test_data_fusable = [
|
||||
dict(shape=[5, 6], axes=[1], output_axes=[1]),
|
||||
@ -178,9 +182,9 @@ class TestNormalizeL2(CommonTFLayerTest):
|
||||
|
||||
@pytest.mark.parametrize("params", test_data_fusable)
|
||||
@pytest.mark.nightly
|
||||
def test_NormalizeL2_fusable(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_normalize_l2_net_fusable(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
def test_NormalizeL2_fusable(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_normalize_l2_net_fusable(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
||||
test_data_non_fusable = [
|
||||
dict(shape=[5], axes=[0], output_axes=[0]),
|
||||
@ -201,7 +205,7 @@ class TestNormalizeL2(CommonTFLayerTest):
|
||||
|
||||
@pytest.mark.parametrize("params", test_data_non_fusable)
|
||||
@pytest.mark.nightly
|
||||
def test_NormalizeL2_non_fusable(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_normalize_l2_net_non_fusable(**params, ir_version=ir_version),
|
||||
def test_NormalizeL2_non_fusable(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_normalize_l2_net_non_fusable(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version,
|
||||
temp_dir=temp_dir)
|
||||
temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
@ -4,12 +4,12 @@
|
||||
import numpy as np
|
||||
import pytest
|
||||
from common.tf_layer_test_class import CommonTFLayerTest
|
||||
from tensorflow_tests.permutation_utils import permute_nchw_to_nhwc
|
||||
from common.utils.tf_utils import permute_nchw_to_nhwc
|
||||
|
||||
|
||||
class TestOneHot(CommonTFLayerTest):
|
||||
@staticmethod
|
||||
def create_one_hot_net(shape, depth, on_value, off_value, axis, ir_version):
|
||||
def create_one_hot_net(shape, depth, on_value, off_value, axis, ir_version, use_new_frontend):
|
||||
"""
|
||||
Tensorflow net
|
||||
|
||||
@ -53,13 +53,6 @@ class TestOneHot(CommonTFLayerTest):
|
||||
# Create reference IR net
|
||||
#
|
||||
|
||||
if on_value is None:
|
||||
on_value = 1.0
|
||||
if off_value is None:
|
||||
off_value = 0.0
|
||||
|
||||
axis = len(shape) if axis is None else axis
|
||||
|
||||
ref_net = None
|
||||
|
||||
return tf_net, ref_net
|
||||
@ -71,9 +64,9 @@ class TestOneHot(CommonTFLayerTest):
|
||||
|
||||
@pytest.mark.parametrize("params", test_data_1D)
|
||||
@pytest.mark.nightly
|
||||
def test_OneHot_1D(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_one_hot_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
def test_OneHot_1D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_one_hot_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
||||
test_data_2D = [
|
||||
dict(shape=[5, 6], depth=7, on_value=None, off_value=None, axis=None),
|
||||
@ -90,9 +83,9 @@ class TestOneHot(CommonTFLayerTest):
|
||||
|
||||
@pytest.mark.parametrize("params", test_data_2D)
|
||||
@pytest.mark.nightly
|
||||
def test_OneHot_2D(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_one_hot_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
def test_OneHot_2D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_one_hot_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
||||
test_data_3D = [
|
||||
dict(shape=[5, 6, 7], depth=8, on_value=None, off_value=None, axis=None),
|
||||
@ -110,9 +103,9 @@ class TestOneHot(CommonTFLayerTest):
|
||||
|
||||
@pytest.mark.parametrize("params", test_data_3D)
|
||||
@pytest.mark.nightly
|
||||
def test_OneHot_3D(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_one_hot_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
def test_OneHot_3D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_one_hot_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
||||
test_data_4D = [
|
||||
dict(shape=[5, 6, 7, 8], depth=9, on_value=None, off_value=None, axis=None),
|
||||
@ -132,9 +125,9 @@ class TestOneHot(CommonTFLayerTest):
|
||||
@pytest.mark.parametrize("params", test_data_4D)
|
||||
@pytest.mark.nightly
|
||||
@pytest.mark.precommit
|
||||
def test_OneHot_4D(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_one_hot_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
def test_OneHot_4D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_one_hot_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
||||
test_data_5D = [
|
||||
dict(shape=[4, 5, 6, 7, 8], depth=9, on_value=None, off_value=None, axis=None),
|
||||
@ -154,6 +147,6 @@ class TestOneHot(CommonTFLayerTest):
|
||||
|
||||
@pytest.mark.parametrize("params", test_data_5D)
|
||||
@pytest.mark.nightly
|
||||
def test_OneHot_5D(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_one_hot_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
def test_OneHot_5D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_one_hot_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
@ -8,7 +8,7 @@ from unit_tests.utils.graph import build_graph
|
||||
|
||||
|
||||
class TestPooling(CommonTFLayerTest):
|
||||
def create_pooling_net(self, kernel_size, strides, pads, in_shape, out_shape, method, ir_version):
|
||||
def create_pooling_net(self, kernel_size, strides, pads, in_shape, out_shape, method, ir_version, use_new_frontend):
|
||||
"""
|
||||
Tensorflow net IR net
|
||||
|
||||
@ -66,7 +66,7 @@ class TestPooling(CommonTFLayerTest):
|
||||
|
||||
ref_net = None
|
||||
|
||||
if check_ir_version(10, None, ir_version):
|
||||
if check_ir_version(10, None, ir_version) and not use_new_frontend:
|
||||
nodes_attributes = {
|
||||
'input': {'kind': 'op', 'type': 'Parameter'},
|
||||
'input_data': {'shape': in_shape, 'kind': 'data'},
|
||||
@ -156,9 +156,9 @@ class TestPooling(CommonTFLayerTest):
|
||||
|
||||
@pytest.mark.parametrize("params", test_data_4D)
|
||||
@pytest.mark.nightly
|
||||
def test_pool_4D(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_pooling_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
def test_pool_4D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_pooling_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
||||
test_data_5D = []
|
||||
for method in ['max', 'avg']:
|
||||
@ -229,8 +229,8 @@ class TestPooling(CommonTFLayerTest):
|
||||
|
||||
@pytest.mark.parametrize("params", test_data_5D)
|
||||
@pytest.mark.nightly
|
||||
def test_pool_5D(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
def test_pool_5D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
if ie_device == 'GPU':
|
||||
pytest.skip("5D tensors is not supported on GPU")
|
||||
self._test(*self.create_pooling_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
self._test(*self.create_pooling_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
@ -9,23 +9,23 @@ from unit_tests.utils.graph import build_graph, regular_op_with_shaped_data, con
|
||||
|
||||
from common.layer_test_class import check_ir_version
|
||||
from common.tf_layer_test_class import CommonTFLayerTest
|
||||
from common.utils.tf_utils import permute_nchw_to_nhwc
|
||||
|
||||
|
||||
class TestTFRandomUniform(CommonTFLayerTest):
|
||||
def create_tf_random_uniform_net(self, global_seed, op_seed, x_shape, min_val, max_val, input_type, precision,
|
||||
ir_version):
|
||||
ir_version, use_new_frontend):
|
||||
tf.compat.v1.reset_default_graph()
|
||||
|
||||
# Create the graph and model
|
||||
with tf.compat.v1.Session() as sess:
|
||||
tf_x_shape = x_shape.copy()
|
||||
# reshaping
|
||||
if len(tf_x_shape) >= 3:
|
||||
tf_x_shape.append(tf_x_shape.pop(1))
|
||||
|
||||
x = tf.compat.v1.placeholder(input_type, x_shape, 'Input')
|
||||
tf_x_shape = permute_nchw_to_nhwc(tf_x_shape, use_new_frontend)
|
||||
|
||||
x = tf.compat.v1.placeholder(input_type, tf_x_shape, 'Input')
|
||||
if global_seed is not None:
|
||||
tf.random.set_seed(global_seed)
|
||||
tf.compat.v1.random.set_random_seed(global_seed)
|
||||
random_uniform = tf.random.uniform(x_shape, seed=op_seed, dtype=input_type, minval=min_val,
|
||||
maxval=max_val) + x
|
||||
|
||||
@ -33,7 +33,7 @@ class TestTFRandomUniform(CommonTFLayerTest):
|
||||
tf_net = sess.graph_def
|
||||
|
||||
ref_net = None
|
||||
if check_ir_version(10, None, ir_version):
|
||||
if check_ir_version(10, None, ir_version) and not use_new_frontend:
|
||||
|
||||
const_for_layer_tests = lambda name, value, shape, shape1: {
|
||||
**{name + '_dd': {'kind': 'data', 'value': value, 'shape': shape1}},
|
||||
@ -86,8 +86,9 @@ class TestTFRandomUniform(CommonTFLayerTest):
|
||||
|
||||
@pytest.mark.parametrize("params", test_data)
|
||||
@pytest.mark.nightly
|
||||
def test_tf_random_uniform(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
def test_tf_random_uniform(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
if ie_device == 'GPU':
|
||||
pytest.skip("RandomUniform is not supported on GPU")
|
||||
self._test(*self.create_tf_random_uniform_net(**params, precision=precision, ir_version=ir_version), ie_device,
|
||||
precision, temp_dir=temp_dir, ir_version=ir_version, **params)
|
||||
self._test(*self.create_tf_random_uniform_net(**params, precision=precision, ir_version=ir_version,
|
||||
use_new_frontend=use_new_frontend), ie_device,
|
||||
precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params)
|
||||
|
@ -1,85 +0,0 @@
|
||||
# Copyright (C) 2018-2021 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import pytest
|
||||
|
||||
from common.layer_test_class import check_ir_version
|
||||
from common.tf_layer_test_class import CommonTFLayerTest
|
||||
from unit_tests.utils.graph import build_graph
|
||||
|
||||
|
||||
class TestReLU(CommonTFLayerTest):
|
||||
def create_relu_net(self, shape, ir_version):
|
||||
"""
|
||||
Tensorflow net IR net
|
||||
|
||||
Input->ReLU => Input->ReLU
|
||||
|
||||
"""
|
||||
|
||||
#
|
||||
# Create Tensorflow model
|
||||
#
|
||||
|
||||
import tensorflow as tf
|
||||
|
||||
tf.compat.v1.reset_default_graph()
|
||||
|
||||
# Create the graph and model
|
||||
with tf.compat.v1.Session() as sess:
|
||||
shapes = shape.copy()
|
||||
# reshaping
|
||||
if len(shapes) >= 3:
|
||||
shapes.append(shapes.pop(1))
|
||||
input = tf.compat.v1.placeholder(tf.float32, shapes, 'Input')
|
||||
|
||||
tf.nn.relu(input, name='Operation')
|
||||
|
||||
tf.compat.v1.global_variables_initializer()
|
||||
tf_net = sess.graph_def
|
||||
|
||||
#
|
||||
# Create reference IR net
|
||||
# Please, specify 'type': 'Input' for input node
|
||||
# Moreover, do not forget to validate ALL layer attributes!!!
|
||||
#
|
||||
|
||||
ref_net = None
|
||||
|
||||
if check_ir_version(10, None, ir_version):
|
||||
nodes_attributes = {
|
||||
'input': {'kind': 'op', 'type': 'Parameter'},
|
||||
'input_data': {'shape': shape, 'kind': 'data'},
|
||||
'ReLU': {'kind': 'op', 'type': 'ReLU'},
|
||||
'ReLU_data': {'shape': shape, 'kind': 'data'},
|
||||
'result': {'kind': 'op', 'type': 'Result'}
|
||||
}
|
||||
|
||||
ref_net = build_graph(nodes_attributes,
|
||||
[('input', 'input_data'),
|
||||
('input_data', 'ReLU'),
|
||||
('ReLU', 'ReLU_data'),
|
||||
('ReLU_data', 'result')
|
||||
])
|
||||
|
||||
return tf_net, ref_net
|
||||
|
||||
test_data_precommit = [dict(shape=[1, 3, 50, 100, 224])]
|
||||
|
||||
@pytest.mark.parametrize("params", test_data_precommit)
|
||||
@pytest.mark.precommit
|
||||
def test_relu_precommit(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_relu_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
|
||||
test_data = [dict(shape=[1]),
|
||||
dict(shape=[1, 224]),
|
||||
pytest.param(dict(shape=[1, 3, 224]), marks=pytest.mark.xfail(reason="*-19053")),
|
||||
dict(shape=[1, 3, 100, 224]),
|
||||
dict(shape=[1, 3, 50, 100, 224])]
|
||||
|
||||
@pytest.mark.parametrize("params", test_data)
|
||||
@pytest.mark.nightly
|
||||
def test_relu(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_relu_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
@ -5,11 +5,12 @@ import pytest
|
||||
|
||||
from common.layer_test_class import check_ir_version
|
||||
from common.tf_layer_test_class import CommonTFLayerTest
|
||||
from common.utils.tf_utils import permute_nchw_to_nhwc
|
||||
from unit_tests.utils.graph import build_graph
|
||||
|
||||
|
||||
class TestReLU6(CommonTFLayerTest):
|
||||
def create_relu6_net(self, shape, ir_version):
|
||||
def create_relu6_net(self, shape, ir_version, use_new_frontend):
|
||||
"""
|
||||
Tensorflow net IR net
|
||||
|
||||
@ -27,11 +28,10 @@ class TestReLU6(CommonTFLayerTest):
|
||||
|
||||
# Create the graph and model
|
||||
with tf.compat.v1.Session() as sess:
|
||||
shapes = shape.copy()
|
||||
# reshaping
|
||||
if len(shapes) >= 3:
|
||||
shapes.append(shapes.pop(1))
|
||||
input = tf.compat.v1.placeholder(tf.float32, shapes, 'Input')
|
||||
tf_x_shape = shape.copy()
|
||||
|
||||
tf_x_shape = permute_nchw_to_nhwc(tf_x_shape, use_new_frontend)
|
||||
input = tf.compat.v1.placeholder(tf.float32, tf_x_shape, 'Input')
|
||||
|
||||
tf.nn.relu6(input, name='Operation')
|
||||
|
||||
@ -46,7 +46,7 @@ class TestReLU6(CommonTFLayerTest):
|
||||
|
||||
ref_net = None
|
||||
|
||||
if check_ir_version(10, None, ir_version):
|
||||
if check_ir_version(10, None, ir_version) and not use_new_frontend:
|
||||
nodes_attributes = {
|
||||
'input': {'kind': 'op', 'type': 'Parameter'},
|
||||
'input_data': {'shape': shape, 'kind': 'data'},
|
||||
@ -68,9 +68,9 @@ class TestReLU6(CommonTFLayerTest):
|
||||
|
||||
@pytest.mark.parametrize("params", test_data_precommit)
|
||||
@pytest.mark.precommit
|
||||
def test_relu6_precommit(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_relu6_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
def test_relu6_precommit(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_relu6_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
||||
test_data = [dict(shape=[1]),
|
||||
dict(shape=[1, 224]),
|
||||
@ -80,6 +80,6 @@ class TestReLU6(CommonTFLayerTest):
|
||||
|
||||
@pytest.mark.parametrize("params", test_data)
|
||||
@pytest.mark.nightly
|
||||
def test_relu6(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_relu6_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
def test_relu6(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_relu6_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
@ -5,6 +5,7 @@ import pytest
|
||||
import numpy as np
|
||||
|
||||
from common.tf_layer_test_class import CommonTFLayerTest
|
||||
from common.utils.tf_utils import permute_nchw_to_nhwc
|
||||
|
||||
|
||||
class TestReduceOps(CommonTFLayerTest):
|
||||
@ -13,7 +14,7 @@ class TestReduceOps(CommonTFLayerTest):
|
||||
inputs_dict[input] = np.random.random(inputs_dict[input])
|
||||
return inputs_dict
|
||||
|
||||
def create_reduce_net(self, shape, operation, keep_dims, axis, ir_version):
|
||||
def create_reduce_net(self, shape, operation, keep_dims, axis, ir_version, use_new_frontend):
|
||||
import tensorflow as tf
|
||||
fn_mapping = {'sum': tf.reduce_sum,
|
||||
'max': tf.reduce_max,
|
||||
@ -23,11 +24,10 @@ class TestReduceOps(CommonTFLayerTest):
|
||||
}
|
||||
tf.compat.v1.reset_default_graph()
|
||||
with tf.compat.v1.Session() as sess:
|
||||
shapes = shape.copy()
|
||||
if len(shapes) >= 4:
|
||||
shapes.append(shapes.pop(1))
|
||||
tf_x_shape = shape.copy()
|
||||
tf_x_shape = permute_nchw_to_nhwc(tf_x_shape, use_new_frontend)
|
||||
|
||||
x = tf.compat.v1.placeholder(tf.float32, shapes, 'Input')
|
||||
x = tf.compat.v1.placeholder(tf.float32, tf_x_shape, 'Input')
|
||||
fn_mapping[operation](x, axis=axis, keepdims=keep_dims, name='Operation')
|
||||
tf.compat.v1.global_variables_initializer()
|
||||
tf_net = sess.graph_def
|
||||
@ -46,9 +46,9 @@ class TestReduceOps(CommonTFLayerTest):
|
||||
@pytest.mark.parametrize("params", test_data)
|
||||
@pytest.mark.parametrize("keep_dims", [True, False])
|
||||
@pytest.mark.nightly
|
||||
def test_reduce(self, params, keep_dims, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_reduce_net(**params, keep_dims=keep_dims, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
def test_reduce(self, params, keep_dims, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_reduce_net(**params, keep_dims=keep_dims, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
||||
test_data_pre_commit = []
|
||||
for operation in ['sum', 'max', 'prod', 'min', 'mean']:
|
||||
@ -58,6 +58,6 @@ class TestReduceOps(CommonTFLayerTest):
|
||||
@pytest.mark.parametrize("params", test_data_pre_commit)
|
||||
@pytest.mark.parametrize("keep_dims", [False])
|
||||
@pytest.mark.precommit
|
||||
def test_reduce_precommit(self, params, keep_dims, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_reduce_net(**params, keep_dims=keep_dims, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
def test_reduce_precommit(self, params, keep_dims, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_reduce_net(**params, keep_dims=keep_dims, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
@ -14,7 +14,7 @@ class TestResamplePattern(CommonTFLayerTest):
|
||||
inputs_dict[input] = np.random.randint(1, 256, inputs_dict[input]).astype(np.float32)
|
||||
return inputs_dict
|
||||
|
||||
def create_resample_net(self, shape, factor):
|
||||
def create_resample_net(self, shape, factor, use_new_frontend):
|
||||
"""
|
||||
The sub-graph in TF that could be expressed as a single Resample operation.
|
||||
"""
|
||||
@ -29,10 +29,10 @@ class TestResamplePattern(CommonTFLayerTest):
|
||||
|
||||
# Create the graph and model
|
||||
with tf.compat.v1.Session() as sess:
|
||||
tf_shape = shape.copy()
|
||||
tf_shape = np.array(tf_shape)[[0, 2, 3, 1]]
|
||||
tf_x_shape = shape.copy()
|
||||
tf_x_shape = np.array(tf_x_shape)[[0, 2, 3, 1]]
|
||||
|
||||
input = tf.compat.v1.placeholder(tf.float32, tf_shape, 'Input')
|
||||
input = tf.compat.v1.placeholder(tf.float32, tf_x_shape, 'Input')
|
||||
|
||||
transpose_1 = tf.transpose(a=input, perm=[1, 2, 3, 0])
|
||||
expand_dims = tf.expand_dims(transpose_1, 0)
|
||||
@ -50,6 +50,8 @@ class TestResamplePattern(CommonTFLayerTest):
|
||||
# Moreover, do not forget to validate ALL layer attributes!!!
|
||||
#
|
||||
|
||||
ref_net = None
|
||||
if not use_new_frontend:
|
||||
new_shape = shape.copy()
|
||||
new_shape[2] *= factor
|
||||
new_shape[3] *= factor
|
||||
@ -76,6 +78,6 @@ class TestResamplePattern(CommonTFLayerTest):
|
||||
@pytest.mark.parametrize("params", test_data)
|
||||
@pytest.mark.nightly
|
||||
@pytest.mark.xfail(reason="*-22273")
|
||||
def test_resample(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_resample_net(params['shape'], params['factor']),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
def test_resample(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_resample_net(params['shape'], params['factor'], use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
@ -1,18 +1,18 @@
|
||||
import pytest
|
||||
import tensorflow as tf
|
||||
from common.tf_layer_test_class import CommonTFLayerTest
|
||||
from common.utils.tf_utils import permute_nchw_to_nhwc
|
||||
|
||||
|
||||
class TestTFRoll(CommonTFLayerTest):
|
||||
def create_tf_roll_net(self, shift, axis, x_shape, input_type, ir_version):
|
||||
def create_tf_roll_net(self, shift, axis, x_shape, input_type, ir_version, use_new_frontend):
|
||||
tf.compat.v1.reset_default_graph()
|
||||
|
||||
# Create the graph and model
|
||||
with tf.compat.v1.Session() as sess:
|
||||
tf_x_shape = x_shape.copy()
|
||||
# reshaping
|
||||
if len(tf_x_shape) >= 3:
|
||||
tf_x_shape.append(tf_x_shape.pop(1))
|
||||
|
||||
tf_x_shape = permute_nchw_to_nhwc(tf_x_shape, use_new_frontend)
|
||||
|
||||
x = tf.compat.v1.placeholder(input_type, tf_x_shape, 'Input')
|
||||
roll = tf.roll(x, shift=shift, axis=axis)
|
||||
@ -35,8 +35,8 @@ class TestTFRoll(CommonTFLayerTest):
|
||||
|
||||
@pytest.mark.parametrize("params", test_data)
|
||||
@pytest.mark.nightly
|
||||
def test_tf_roll(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
def test_tf_roll(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
if ie_device == 'GPU':
|
||||
pytest.skip("Roll is not supported on GPU")
|
||||
self._test(*self.create_tf_roll_net(**params, ir_version=ir_version), ie_device, precision,
|
||||
temp_dir=temp_dir, ir_version=ir_version, **params)
|
||||
self._test(*self.create_tf_roll_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision,
|
||||
temp_dir=temp_dir, ir_version=ir_version, **params, use_new_frontend=use_new_frontend)
|
||||
|
@ -5,6 +5,7 @@ import numpy as np
|
||||
import pytest
|
||||
|
||||
from common.tf_layer_test_class import CommonTFLayerTest
|
||||
from common.utils.tf_utils import permute_nchw_to_nhwc
|
||||
|
||||
|
||||
class TestRsqrt(CommonTFLayerTest):
|
||||
@ -13,7 +14,7 @@ class TestRsqrt(CommonTFLayerTest):
|
||||
inputs_dict[input] = np.random.randint(1, 256, inputs_dict[input]).astype(np.float32)
|
||||
return inputs_dict
|
||||
|
||||
def create_rsqrt_net(self, shape, ir_version):
|
||||
def create_rsqrt_net(self, shape, ir_version, use_new_frontend):
|
||||
"""
|
||||
Tensorflow net IR net
|
||||
|
||||
@ -31,11 +32,10 @@ class TestRsqrt(CommonTFLayerTest):
|
||||
|
||||
# Create the graph and model
|
||||
with tf.compat.v1.Session() as sess:
|
||||
shapes = shape.copy()
|
||||
# reshaping
|
||||
if len(shapes) >= 3:
|
||||
shapes.append(shapes.pop(1))
|
||||
input = tf.compat.v1.placeholder(tf.float32, shapes, 'Input')
|
||||
tf_x_shape = shape.copy()
|
||||
|
||||
tf_x_shape = permute_nchw_to_nhwc(tf_x_shape, use_new_frontend)
|
||||
input = tf.compat.v1.placeholder(tf.float32, tf_x_shape, 'Input')
|
||||
|
||||
tf.math.rsqrt(input, name='Operation')
|
||||
|
||||
@ -56,9 +56,9 @@ class TestRsqrt(CommonTFLayerTest):
|
||||
|
||||
@pytest.mark.parametrize("params", test_data_precommit)
|
||||
@pytest.mark.precommit
|
||||
def test_rsqrt_precommit(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_rsqrt_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
def test_rsqrt_precommit(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_rsqrt_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
||||
test_data = [dict(shape=[1]),
|
||||
dict(shape=[1, 224]),
|
||||
@ -68,6 +68,6 @@ class TestRsqrt(CommonTFLayerTest):
|
||||
|
||||
@pytest.mark.parametrize("params", test_data)
|
||||
@pytest.mark.nightly
|
||||
def test_rsqrt(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_rsqrt_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
def test_rsqrt(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_rsqrt_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
@ -4,11 +4,11 @@
|
||||
import pytest
|
||||
|
||||
from common.tf_layer_test_class import CommonTFLayerTest
|
||||
from tensorflow_tests.permutation_utils import permute_nchw_to_nhwc
|
||||
from common.utils.tf_utils import permute_nchw_to_nhwc
|
||||
|
||||
|
||||
class TestSelect(CommonTFLayerTest):
|
||||
def create_select_net(self, shape_condition, shape_input, ir_version):
|
||||
def create_select_net(self, shape_condition, shape_input, ir_version, use_new_frontend):
|
||||
"""
|
||||
Tensorflow net IR net
|
||||
|
||||
@ -56,9 +56,9 @@ class TestSelect(CommonTFLayerTest):
|
||||
|
||||
@pytest.mark.parametrize("params", test_data_1D)
|
||||
@pytest.mark.nightly
|
||||
def test_select_1D(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_select_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
def test_select_1D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_select_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
||||
test_data_2D = [
|
||||
dict(shape_condition=[2], shape_input=[2, 3]),
|
||||
@ -67,9 +67,9 @@ class TestSelect(CommonTFLayerTest):
|
||||
|
||||
@pytest.mark.parametrize("params", test_data_2D)
|
||||
@pytest.mark.nightly
|
||||
def test_select_2D(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_select_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
def test_select_2D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_select_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
||||
test_data_3D = [
|
||||
dict(shape_condition=[3], shape_input=[3, 4, 5]),
|
||||
@ -78,9 +78,9 @@ class TestSelect(CommonTFLayerTest):
|
||||
|
||||
@pytest.mark.parametrize("params", test_data_3D)
|
||||
@pytest.mark.nightly
|
||||
def test_select_3D(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_select_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
def test_select_3D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_select_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
||||
test_data_4D = [
|
||||
dict(shape_condition=[3], shape_input=[3, 4, 5, 6]),
|
||||
@ -90,9 +90,9 @@ class TestSelect(CommonTFLayerTest):
|
||||
@pytest.mark.parametrize("params", test_data_4D)
|
||||
@pytest.mark.nightly
|
||||
@pytest.mark.precommit
|
||||
def test_select_4D(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_select_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
def test_select_4D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_select_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
||||
test_data_5D = [
|
||||
dict(shape_condition=[3], shape_input=[3, 4, 5, 6, 7]),
|
||||
@ -102,6 +102,6 @@ class TestSelect(CommonTFLayerTest):
|
||||
# TODO mark as precommit (after successfully passing in nightly)
|
||||
@pytest.mark.parametrize("params", test_data_5D)
|
||||
@pytest.mark.nightly
|
||||
def test_select_5D(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_select_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
def test_select_5D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_select_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
@ -1,90 +0,0 @@
|
||||
# Copyright (C) 2018-2021 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import pytest
|
||||
|
||||
from common.layer_test_class import check_ir_version
|
||||
from common.tf_layer_test_class import CommonTFLayerTest
|
||||
from unit_tests.utils.graph import build_graph
|
||||
|
||||
|
||||
class TestSoftplus(CommonTFLayerTest):
|
||||
def create_softplus_net(self, shape, ir_version):
|
||||
"""
|
||||
Tensorflow net IR net
|
||||
|
||||
Input->Softplus => Input->Softplus
|
||||
|
||||
"""
|
||||
|
||||
#
|
||||
# Create Tensorflow model
|
||||
#
|
||||
|
||||
import tensorflow as tf
|
||||
|
||||
tf.reset_default_graph()
|
||||
|
||||
# Create the graph and model
|
||||
with tf.Session() as sess:
|
||||
shapes = shape.copy()
|
||||
# reshaping
|
||||
if len(shapes) > 3:
|
||||
shapes.append(shapes.pop(1))
|
||||
input = tf.placeholder(tf.float32, shapes, 'Input')
|
||||
|
||||
tf.math.softplus(input, name='Operation')
|
||||
|
||||
tf.global_variables_initializer()
|
||||
tf_net = sess.graph_def
|
||||
|
||||
#
|
||||
# Create reference IR net
|
||||
# Please, specify 'type': 'Input' for input node
|
||||
# Moreover, do not forget to validate ALL layer attributes!!!
|
||||
#
|
||||
|
||||
ref_net = None
|
||||
|
||||
if check_ir_version(10, None, ir_version):
|
||||
nodes_attributes = {
|
||||
'input': {'kind': 'op', 'type': 'Parameter'},
|
||||
'input_data': {'shape': shape, 'kind': 'data'},
|
||||
'Softplus': {'kind': 'op', 'type': 'SoftPlus'},
|
||||
'Softplus_data': {'shape': shape, 'kind': 'data'},
|
||||
'result': {'kind': 'op', 'type': 'Result'}
|
||||
}
|
||||
|
||||
ref_net = build_graph(nodes_attributes,
|
||||
[('input', 'input_data'),
|
||||
('input_data', 'Softplus'),
|
||||
('Softplus', 'Softplus_data'),
|
||||
('Softplus_data', 'result')
|
||||
])
|
||||
|
||||
return tf_net, ref_net
|
||||
|
||||
test_data_precommit = [
|
||||
pytest.param(dict(shape=[1, 3, 100, 224]),
|
||||
marks=pytest.mark.skip(reason="Skipped until fixed")),
|
||||
pytest.param(dict(shape=[1, 3, 50, 100, 224]),
|
||||
marks=pytest.mark.skip(reason="Skipped until fixed"))
|
||||
]
|
||||
|
||||
@pytest.mark.parametrize("params", test_data_precommit)
|
||||
@pytest.mark.precommit
|
||||
def test_softplus_precommit(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_softplus_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
|
||||
test_data = [dict(shape=[1]),
|
||||
dict(shape=[1, 224]),
|
||||
dict(shape=[1, 3, 224]),
|
||||
dict(shape=[1, 3, 100, 224]),
|
||||
dict(shape=[1, 3, 50, 100, 224])]
|
||||
|
||||
@pytest.mark.parametrize("params", test_data)
|
||||
@pytest.mark.nightly
|
||||
def test_softplus(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_softplus_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
@ -7,7 +7,7 @@ from common.tf_layer_test_class import CommonTFLayerTest
|
||||
|
||||
|
||||
class TestSpaceToBatch(CommonTFLayerTest):
|
||||
def create_space_to_batch_net(self, in_shape, pads_value, block_shape_value, out_shape, ir_version):
|
||||
def create_space_to_batch_net(self, in_shape, pads_value, block_shape_value, out_shape, ir_version, use_new_frontend):
|
||||
"""
|
||||
Tensorflow net IR net
|
||||
|
||||
@ -28,7 +28,7 @@ class TestSpaceToBatch(CommonTFLayerTest):
|
||||
x = tf.compat.v1.placeholder(tf.float32, in_shape, 'Input')
|
||||
pads = tf.constant(pads_value)
|
||||
block_shape = tf.constant(block_shape_value)
|
||||
tf.space_to_batch(x, block_shape, pads, name='Operation')
|
||||
tf.space_to_batch_nd(x, block_shape, pads, name='Operation')
|
||||
|
||||
tf.compat.v1.global_variables_initializer()
|
||||
tf_net = sess.graph_def
|
||||
@ -61,9 +61,9 @@ class TestSpaceToBatch(CommonTFLayerTest):
|
||||
|
||||
@pytest.mark.parametrize("params", test_data_4D)
|
||||
@pytest.mark.nightly
|
||||
def test_space_to_batch_4D(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_space_to_batch_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
def test_space_to_batch_4D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_space_to_batch_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
||||
test_data_5D = [
|
||||
dict(in_shape=[3, 3, 4, 5, 2], block_shape_value=[3, 4, 2], pads_value=[[1, 2], [0, 0], [3, 0]],
|
||||
@ -75,6 +75,6 @@ class TestSpaceToBatch(CommonTFLayerTest):
|
||||
|
||||
@pytest.mark.parametrize("params", test_data_5D)
|
||||
@pytest.mark.nightly
|
||||
def test_space_to_batch_5D(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_space_to_batch_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
def test_space_to_batch_5D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_space_to_batch_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
@ -4,10 +4,11 @@
|
||||
import pytest
|
||||
|
||||
from common.tf_layer_test_class import CommonTFLayerTest
|
||||
from common.utils.tf_utils import permute_nchw_to_nhwc
|
||||
|
||||
|
||||
class TestSqueeze(CommonTFLayerTest):
|
||||
def create_squeeze_net(self, shape, axis, ir_version):
|
||||
def create_squeeze_net(self, shape, axis, ir_version, use_new_frontend):
|
||||
"""
|
||||
Tensorflow net IR net
|
||||
|
||||
@ -25,12 +26,11 @@ class TestSqueeze(CommonTFLayerTest):
|
||||
|
||||
# Create the graph and model
|
||||
with tf.compat.v1.Session() as sess:
|
||||
x_shape = shape.copy()
|
||||
# reshaping
|
||||
if len(x_shape) >= 3:
|
||||
x_shape.append(x_shape.pop(1))
|
||||
tf_x_shape = shape.copy()
|
||||
|
||||
x = tf.compat.v1.placeholder(tf.float32, x_shape, 'Input')
|
||||
tf_x_shape = permute_nchw_to_nhwc(tf_x_shape, use_new_frontend)
|
||||
|
||||
x = tf.compat.v1.placeholder(tf.float32, tf_x_shape, 'Input')
|
||||
squeeze = tf.squeeze(x, axis=axis, name="Operation")
|
||||
|
||||
tf.compat.v1.global_variables_initializer()
|
||||
@ -42,25 +42,21 @@ class TestSqueeze(CommonTFLayerTest):
|
||||
# Moreover, do not forget to validate ALL layer attributes!!!
|
||||
#
|
||||
|
||||
unsigned_axis = [ax if ax > -1 else len(x_shape) + ax for ax in axis]
|
||||
if not unsigned_axis:
|
||||
unsigned_axis = [i for i, dim in enumerate(shape) if dim == 1]
|
||||
|
||||
ref_net = None
|
||||
|
||||
return tf_net, ref_net
|
||||
|
||||
test_data_1D = [
|
||||
pytest.param(dict(shape=[1], axis=[]), marks=pytest.mark.xfail(reason="*-18807")),
|
||||
pytest.param(dict(shape=[1], axis=[0]), marks=pytest.mark.xfail(reason="*-18859")),
|
||||
pytest.param(dict(shape=[1], axis=[-1]), marks=pytest.mark.xfail(reason="*-18859"))
|
||||
pytest.param(dict(shape=[1], axis=[])),
|
||||
pytest.param(dict(shape=[1], axis=[0])),
|
||||
pytest.param(dict(shape=[1], axis=[-1]))
|
||||
]
|
||||
|
||||
@pytest.mark.parametrize("params", test_data_1D)
|
||||
@pytest.mark.nightly
|
||||
def test_squeeze_1D(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_squeeze_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
def test_squeeze_1D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_squeeze_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
||||
test_data_2D = [
|
||||
pytest.param(dict(shape=[1, 1], axis=[]), marks=pytest.mark.xfail(reason="*-18807")),
|
||||
@ -70,9 +66,9 @@ class TestSqueeze(CommonTFLayerTest):
|
||||
|
||||
@pytest.mark.parametrize("params", test_data_2D)
|
||||
@pytest.mark.nightly
|
||||
def test_squeeze_2D(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_squeeze_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
def test_squeeze_2D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_squeeze_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
||||
test_data_3D = [
|
||||
pytest.param(dict(shape=[1, 1, 3], axis=[]),
|
||||
@ -84,9 +80,9 @@ class TestSqueeze(CommonTFLayerTest):
|
||||
# TODO mark as precommit (after successfully passing in nightly)
|
||||
@pytest.mark.parametrize("params", test_data_3D)
|
||||
@pytest.mark.nightly
|
||||
def test_squeeze_3D(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_squeeze_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
def test_squeeze_3D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_squeeze_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
||||
test_data_4D = [
|
||||
pytest.param(dict(shape=[1, 1, 50, 100], axis=[]), marks=pytest.mark.xfail(reason="*-18807")),
|
||||
@ -98,9 +94,9 @@ class TestSqueeze(CommonTFLayerTest):
|
||||
# TODO mark as precommit (after successfully passing in nightly)
|
||||
@pytest.mark.parametrize("params", test_data_4D)
|
||||
@pytest.mark.nightly
|
||||
def test_squeeze_4D(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_squeeze_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
def test_squeeze_4D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_squeeze_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
||||
test_data_5D = [
|
||||
pytest.param(dict(shape=[1, 1, 50, 100, 224], axis=[]), marks=pytest.mark.xfail(reason="*-18807")),
|
||||
@ -119,6 +115,6 @@ class TestSqueeze(CommonTFLayerTest):
|
||||
reason="*-19394")
|
||||
@pytest.mark.parametrize("params", test_data_5D)
|
||||
@pytest.mark.nightly
|
||||
def test_squeeze_5D(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_squeeze_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
def test_squeeze_5D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_squeeze_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
@ -10,7 +10,7 @@ class TestStridedSlice(CommonTFLayerTest):
|
||||
|
||||
@staticmethod
|
||||
def create_strided_slice_net(input_shape, begin, end, strides, begin_mask, end_mask, ellipsis_mask,
|
||||
new_axis_mask, shrink_axis_mask, ir_version):
|
||||
new_axis_mask, shrink_axis_mask, ir_version, use_new_frontend):
|
||||
|
||||
#
|
||||
# Create Tensorflow model
|
||||
@ -60,9 +60,9 @@ class TestStridedSlice(CommonTFLayerTest):
|
||||
|
||||
@pytest.mark.parametrize('params', test_squeeze_data)
|
||||
@pytest.mark.nightly
|
||||
def test_strided_slice_replace_with_squeeze(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_strided_slice_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
def test_strided_slice_replace_with_squeeze(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_strided_slice_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
||||
test_unsqueeze_data = [
|
||||
dict(input_shape=[1, 5], begin=[0, 0], end=[1, 5], strides=[1, 1], begin_mask=0,
|
||||
@ -85,6 +85,6 @@ class TestStridedSlice(CommonTFLayerTest):
|
||||
|
||||
@pytest.mark.parametrize('params', test_unsqueeze_data)
|
||||
@pytest.mark.nightly
|
||||
def test_strided_slice_replace_with_unsqueeze(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_strided_slice_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
def test_strided_slice_replace_with_unsqueeze(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_strided_slice_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
@ -5,10 +5,11 @@ import numpy as np
|
||||
import pytest
|
||||
|
||||
from common.tf_layer_test_class import CommonTFLayerTest
|
||||
from common.utils.tf_utils import permute_nchw_to_nhwc
|
||||
|
||||
|
||||
class TestSub(CommonTFLayerTest):
|
||||
def create_sub_placeholder_const_net(self, x_shape, y_shape, ir_version):
|
||||
def create_sub_placeholder_const_net(self, x_shape, y_shape, ir_version, use_new_frontend):
|
||||
"""
|
||||
Tensorflow net IR net
|
||||
|
||||
@ -30,11 +31,9 @@ class TestSub(CommonTFLayerTest):
|
||||
with tf.compat.v1.Session() as sess:
|
||||
tf_x_shape = x_shape.copy()
|
||||
tf_y_shape = y_shape.copy()
|
||||
# reshaping
|
||||
if len(tf_x_shape) >= 3:
|
||||
tf_x_shape.append(tf_x_shape.pop(1))
|
||||
if len(tf_y_shape) >= 3:
|
||||
tf_y_shape.append(tf_y_shape.pop(1))
|
||||
|
||||
tf_x_shape = permute_nchw_to_nhwc(tf_x_shape, use_new_frontend)
|
||||
tf_y_shape = permute_nchw_to_nhwc(tf_y_shape, use_new_frontend)
|
||||
|
||||
x = tf.compat.v1.placeholder(tf.float32, tf_x_shape, 'Input')
|
||||
constant_value = np.random.randint(-256, 256, tf_y_shape).astype(np.float32)
|
||||
@ -55,23 +54,6 @@ class TestSub(CommonTFLayerTest):
|
||||
# Moreover, do not forget to validate ALL layer attributes!!!
|
||||
#
|
||||
|
||||
if len(sub_shape) >= 3:
|
||||
# Permute sub_shape to (N,C,...) format
|
||||
order = [0, len(sub_shape) - 1] + list(range(1, len(sub_shape) - 1))
|
||||
sub_shape = [sub_shape[i] for i in order]
|
||||
|
||||
y_shape_to_compare = tf_y_shape.copy()
|
||||
while len(y_shape_to_compare) < len(x_shape):
|
||||
# Expand shape of constant with 1
|
||||
y_shape_to_compare = [1] + y_shape_to_compare
|
||||
constant_value = np.expand_dims(constant_value, axis=0)
|
||||
|
||||
if len(y_shape_to_compare) >= 3:
|
||||
# Permute constant_value to (N,C,...) format for correct further reshape
|
||||
order = [0, len(y_shape_to_compare) - 1] + list(range(1, len(y_shape_to_compare) - 1))
|
||||
y_shape_to_compare = [y_shape_to_compare[i] for i in order]
|
||||
constant_value = np.transpose(constant_value, order)
|
||||
|
||||
ref_net = None
|
||||
|
||||
return tf_net, ref_net
|
||||
@ -87,10 +69,10 @@ class TestSub(CommonTFLayerTest):
|
||||
|
||||
@pytest.mark.parametrize("params", test_data_1D)
|
||||
@pytest.mark.nightly
|
||||
def test_sub_placeholder_const_1D(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_sub_placeholder_const_net(**params, ir_version=ir_version),
|
||||
def test_sub_placeholder_const_1D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_sub_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version,
|
||||
temp_dir=temp_dir)
|
||||
temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
||||
test_data_2D = [
|
||||
# Power
|
||||
@ -105,10 +87,10 @@ class TestSub(CommonTFLayerTest):
|
||||
|
||||
@pytest.mark.parametrize("params", test_data_2D)
|
||||
@pytest.mark.nightly
|
||||
def test_sub_placeholder_const_2D(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_sub_placeholder_const_net(**params, ir_version=ir_version),
|
||||
def test_sub_placeholder_const_2D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_sub_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version,
|
||||
temp_dir=temp_dir)
|
||||
temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
||||
test_data_3D = [
|
||||
# Power
|
||||
@ -125,10 +107,10 @@ class TestSub(CommonTFLayerTest):
|
||||
# TODO mark as precommit (after successfully passing in nightly)
|
||||
@pytest.mark.parametrize("params", test_data_3D)
|
||||
@pytest.mark.nightly
|
||||
def test_sub_placeholder_const_3D(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_sub_placeholder_const_net(**params, ir_version=ir_version),
|
||||
def test_sub_placeholder_const_3D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_sub_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version,
|
||||
temp_dir=temp_dir)
|
||||
temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
||||
test_data_4D = [
|
||||
# Power
|
||||
@ -144,10 +126,10 @@ class TestSub(CommonTFLayerTest):
|
||||
# TODO mark as precommit (after successfully passing in nightly)
|
||||
@pytest.mark.parametrize("params", test_data_4D)
|
||||
@pytest.mark.nightly
|
||||
def test_sub_placeholder_const_4D(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_sub_placeholder_const_net(**params, ir_version=ir_version),
|
||||
def test_sub_placeholder_const_4D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_sub_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version,
|
||||
temp_dir=temp_dir)
|
||||
temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
||||
test_data_5D = [
|
||||
# Power
|
||||
@ -164,10 +146,10 @@ class TestSub(CommonTFLayerTest):
|
||||
# TODO mark as precommit (after successfully passing in nightly)
|
||||
@pytest.mark.parametrize("params", test_data_5D)
|
||||
@pytest.mark.nightly
|
||||
def test_sub_placeholder_const_5D(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_sub_placeholder_const_net(**params, ir_version=ir_version),
|
||||
def test_sub_placeholder_const_5D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_sub_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version,
|
||||
temp_dir=temp_dir)
|
||||
temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
||||
###############################################################################################
|
||||
# #
|
||||
@ -181,10 +163,10 @@ class TestSub(CommonTFLayerTest):
|
||||
|
||||
@pytest.mark.parametrize("params", test_data_broadcast_1D)
|
||||
@pytest.mark.nightly
|
||||
def test_sub_placeholder_const_broadcast_1D(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_sub_placeholder_const_net(**params, ir_version=ir_version),
|
||||
def test_sub_placeholder_const_broadcast_1D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_sub_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version,
|
||||
temp_dir=temp_dir)
|
||||
temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
||||
test_data_broadcast_2D = [
|
||||
# Power
|
||||
@ -201,10 +183,10 @@ class TestSub(CommonTFLayerTest):
|
||||
|
||||
@pytest.mark.parametrize("params", test_data_broadcast_2D)
|
||||
@pytest.mark.nightly
|
||||
def test_sub_placeholder_const_broadcast_2D(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_sub_placeholder_const_net(**params, ir_version=ir_version),
|
||||
def test_sub_placeholder_const_broadcast_2D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_sub_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version,
|
||||
temp_dir=temp_dir)
|
||||
temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
||||
test_data_broadcast_3D = [
|
||||
# Power
|
||||
@ -226,10 +208,10 @@ class TestSub(CommonTFLayerTest):
|
||||
# TODO mark as precommit (after successfully passing in nightly)
|
||||
@pytest.mark.parametrize("params", test_data_broadcast_3D)
|
||||
@pytest.mark.nightly
|
||||
def test_sub_placeholder_const_broadcast_3D(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_sub_placeholder_const_net(**params, ir_version=ir_version),
|
||||
def test_sub_placeholder_const_broadcast_3D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_sub_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version,
|
||||
temp_dir=temp_dir)
|
||||
temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
||||
test_data_broadcast_4D = [
|
||||
# Power
|
||||
@ -258,10 +240,10 @@ class TestSub(CommonTFLayerTest):
|
||||
# TODO mark as precommit (after successfully passing in nightly)
|
||||
@pytest.mark.parametrize("params", test_data_broadcast_4D)
|
||||
@pytest.mark.nightly
|
||||
def test_sub_placeholder_const_broadcast_4D(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_sub_placeholder_const_net(**params, ir_version=ir_version),
|
||||
def test_sub_placeholder_const_broadcast_4D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_sub_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version,
|
||||
temp_dir=temp_dir)
|
||||
temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
||||
test_data_broadcast_5D = [
|
||||
# Power
|
||||
@ -288,7 +270,7 @@ class TestSub(CommonTFLayerTest):
|
||||
# TODO mark as precommit (after successfully passing in nightly)
|
||||
@pytest.mark.parametrize("params", test_data_broadcast_5D)
|
||||
@pytest.mark.nightly
|
||||
def test_sub_placeholder_const_broadcast_5D(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_sub_placeholder_const_net(**params, ir_version=ir_version),
|
||||
def test_sub_placeholder_const_broadcast_5D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_sub_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version,
|
||||
temp_dir=temp_dir)
|
||||
temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
@ -5,11 +5,12 @@ import pytest
|
||||
|
||||
from common.layer_test_class import check_ir_version
|
||||
from common.tf_layer_test_class import CommonTFLayerTest
|
||||
from common.utils.tf_utils import permute_nchw_to_nhwc
|
||||
from unit_tests.utils.graph import build_graph
|
||||
|
||||
|
||||
class TestSwish(CommonTFLayerTest):
|
||||
def create_swish_net(self, shape, ir_version):
|
||||
def create_swish_net(self, shape, ir_version, use_new_frontend):
|
||||
"""
|
||||
Tensorflow net IR net
|
||||
|
||||
@ -27,11 +28,10 @@ class TestSwish(CommonTFLayerTest):
|
||||
|
||||
# Create the graph and model
|
||||
with tf.Session() as sess:
|
||||
shapes = shape.copy()
|
||||
# reshaping
|
||||
if len(shapes) > 3:
|
||||
shapes.append(shapes.pop(1))
|
||||
input = tf.placeholder(tf.float32, shapes, 'Input')
|
||||
tf_x_shape = shape.copy()
|
||||
|
||||
tf_x_shape = permute_nchw_to_nhwc(tf_x_shape, use_new_frontend)
|
||||
input = tf.placeholder(tf.float32, tf_x_shape, 'Input')
|
||||
|
||||
tf.nn.swish(input)
|
||||
|
||||
@ -46,7 +46,7 @@ class TestSwish(CommonTFLayerTest):
|
||||
|
||||
ref_net = None
|
||||
|
||||
if check_ir_version(10, None, ir_version):
|
||||
if check_ir_version(10, None, ir_version) and not use_new_frontend:
|
||||
nodes_attributes = {
|
||||
'input': {'kind': 'op', 'type': 'Parameter'},
|
||||
'input_data': {'shape': shape, 'kind': 'data'},
|
||||
@ -71,9 +71,9 @@ class TestSwish(CommonTFLayerTest):
|
||||
|
||||
@pytest.mark.parametrize("params", test_data_precommit)
|
||||
@pytest.mark.precommit
|
||||
def test_swish_precommit(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_swish_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
def test_swish_precommit(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_swish_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
||||
test_data = [dict(shape=[1]),
|
||||
dict(shape=[1, 224]),
|
||||
@ -83,6 +83,6 @@ class TestSwish(CommonTFLayerTest):
|
||||
|
||||
@pytest.mark.parametrize("params", test_data)
|
||||
@pytest.mark.nightly
|
||||
def test_swish(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_swish_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
def test_swish(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_swish_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
@ -7,12 +7,12 @@ from common.layer_test_class import check_ir_version
|
||||
from common.tf_layer_test_class import CommonTFLayerTest
|
||||
from mo.ops.op import PermuteAttrs
|
||||
from unit_tests.utils.graph import build_graph
|
||||
from tensorflow_tests.permutation_utils import permute_nchw_to_nhwc, permute_axis
|
||||
from common.utils.tf_utils import permute_nchw_to_nhwc, permute_axis
|
||||
|
||||
|
||||
class Test_TopK(CommonTFLayerTest):
|
||||
@staticmethod
|
||||
def create_topK_net(shape, k, ir_version):
|
||||
def create_topK_net(shape, k, ir_version, use_new_frontend):
|
||||
"""
|
||||
Tensorflow net:
|
||||
|
||||
@ -57,7 +57,7 @@ class Test_TopK(CommonTFLayerTest):
|
||||
|
||||
ref_net = None
|
||||
|
||||
if check_ir_version(10, None, ir_version):
|
||||
if check_ir_version(10, None, ir_version) and not use_new_frontend:
|
||||
nodes_attributes = {
|
||||
'input': {'kind': 'op', 'type': 'Parameter'},
|
||||
'input_data': {'shape': shape, 'kind': 'data'},
|
||||
@ -94,9 +94,9 @@ class Test_TopK(CommonTFLayerTest):
|
||||
|
||||
@pytest.mark.parametrize("params", test_data_1D)
|
||||
@pytest.mark.nightly
|
||||
def test_TopK_1D(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_topK_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
def test_TopK_1D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_topK_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
||||
test_data_2D = [
|
||||
dict(shape=[14, 15], k=10),
|
||||
@ -105,9 +105,9 @@ class Test_TopK(CommonTFLayerTest):
|
||||
|
||||
@pytest.mark.parametrize("params", test_data_2D)
|
||||
@pytest.mark.nightly
|
||||
def test_TopK_2D(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_topK_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
def test_TopK_2D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_topK_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
||||
test_data_3D = [
|
||||
dict(shape=[13, 14, 15], k=10),
|
||||
@ -116,9 +116,9 @@ class Test_TopK(CommonTFLayerTest):
|
||||
|
||||
@pytest.mark.parametrize("params", test_data_3D)
|
||||
@pytest.mark.nightly
|
||||
def test_TopK_3D(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_topK_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
def test_TopK_3D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_topK_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
||||
test_data_4D = [
|
||||
dict(shape=[12, 13, 14, 15], k=10),
|
||||
@ -127,9 +127,9 @@ class Test_TopK(CommonTFLayerTest):
|
||||
|
||||
@pytest.mark.parametrize("params", test_data_4D)
|
||||
@pytest.mark.nightly
|
||||
def test_TopK_4D(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_topK_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
def test_TopK_4D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_topK_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
||||
test_data_5D = [
|
||||
dict(shape=[11, 12, 13, 14, 15], k=10),
|
||||
@ -138,6 +138,6 @@ class Test_TopK(CommonTFLayerTest):
|
||||
|
||||
@pytest.mark.parametrize("params", test_data_5D)
|
||||
@pytest.mark.nightly
|
||||
def test_TopK_5D(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_topK_net(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir)
|
||||
def test_TopK_5D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
|
||||
self._test(*self.create_topK_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
202
tests/layer_tests/tensorflow_tests/test_tf_UnaryOps.py
Normal file
202
tests/layer_tests/tensorflow_tests/test_tf_UnaryOps.py
Normal file
@ -0,0 +1,202 @@
|
||||
# Copyright (C) 2018-2021 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import pytest
|
||||
from common.layer_test_class import check_ir_version
|
||||
from common.tf_layer_test_class import CommonTFLayerTest
|
||||
from unit_tests.utils.graph import build_graph
|
||||
from common.utils.tf_utils import permute_nchw_to_nhwc
|
||||
|
||||
import numpy as np
|
||||
|
||||
|
||||
class TestUnaryOps(CommonTFLayerTest):
|
||||
current_op_type = None
|
||||
|
||||
def _prepare_input(self, inputs_dict):
|
||||
non_negative = ['Sqrt', 'Log']
|
||||
narrow_borders = ["Sinh", "Cosh", "Tanh", "Exp"]
|
||||
within_one = ['Asin', 'Acos', 'Atanh']
|
||||
from_one = ['Acosh']
|
||||
|
||||
logical_type = ['LogicalNot']
|
||||
|
||||
# usual function domain
|
||||
lower = -256
|
||||
upper = 256
|
||||
|
||||
# specific domains
|
||||
if self.current_op_type in non_negative:
|
||||
lower = 0
|
||||
elif self.current_op_type in narrow_borders:
|
||||
lower = -16
|
||||
upper = 16
|
||||
elif self.current_op_type in from_one:
|
||||
lower = 1
|
||||
elif self.current_op_type in within_one:
|
||||
lower = -1
|
||||
upper = 1
|
||||
|
||||
for input in inputs_dict.keys():
|
||||
if self.current_op_type in logical_type:
|
||||
inputs_dict[input] = np.random.randint(0, 1, inputs_dict[input]).astype(np.bool)
|
||||
else:
|
||||
inputs_dict[input] = np.random.uniform(lower, upper, inputs_dict[input]).astype(np.float32)
|
||||
|
||||
return inputs_dict
|
||||
|
||||
def create_net_with_unary_op(self, shape, ir_version, op_type, use_new_frontend):
|
||||
"""
|
||||
Tensorflow net IR net
|
||||
|
||||
Input->UnaryOp => Input->UnaryOp
|
||||
|
||||
"""
|
||||
import tensorflow as tf
|
||||
self.current_op_type = op_type
|
||||
op_type_to_tf = {
|
||||
'Abs': tf.math.abs,
|
||||
'Acos': tf.math.acos,
|
||||
'Acosh': tf.math.acosh,
|
||||
'Asin': tf.math.asin,
|
||||
'Asinh': tf.math.asinh,
|
||||
'Atan': tf.math.atan,
|
||||
'Atanh': tf.math.atanh,
|
||||
'Ceiling': tf.math.ceil,
|
||||
'Cos': tf.math.cos,
|
||||
'Cosh': tf.math.cosh,
|
||||
'Elu': tf.nn.elu,
|
||||
'Exp': tf.math.exp,
|
||||
'Floor': tf.math.floor,
|
||||
'Log': tf.math.log,
|
||||
'LogicalNot': tf.math.logical_not,
|
||||
'Negative': tf.math.negative,
|
||||
'Sigmoid': tf.nn.sigmoid,
|
||||
'Sign': tf.math.sign,
|
||||
'Sin': tf.math.sin,
|
||||
'Sinh': tf.math.sinh,
|
||||
'SoftPlus': tf.nn.softplus,
|
||||
'Tan': tf.math.tan,
|
||||
'Tanh': tf.math.tanh,
|
||||
'ReLU': tf.nn.relu,
|
||||
}
|
||||
|
||||
#
|
||||
# Create Tensorflow model
|
||||
#
|
||||
|
||||
tf.compat.v1.reset_default_graph()
|
||||
|
||||
type = tf.float32
|
||||
if op_type == "LogicalNot":
|
||||
type = tf.bool
|
||||
# Create the graph and model
|
||||
with tf.compat.v1.Session() as sess:
|
||||
tf_x_shape = shape.copy()
|
||||
tf_x_shape = permute_nchw_to_nhwc(tf_x_shape, use_new_frontend)
|
||||
|
||||
input = tf.compat.v1.placeholder(type, tf_x_shape, 'Input')
|
||||
op_type_to_tf[self.current_op_type](input, name='Operation')
|
||||
|
||||
tf.compat.v1.global_variables_initializer()
|
||||
tf_net = sess.graph_def
|
||||
|
||||
#
|
||||
# Create reference IR net
|
||||
# Please, specify 'type': 'Input' for input node
|
||||
# Moreover, do not forget to validate ALL layer attributes!!!
|
||||
#
|
||||
|
||||
ref_net = None
|
||||
|
||||
if check_ir_version(10, None, ir_version) and not use_new_frontend:
|
||||
nodes_attributes = {
|
||||
'input': {'kind': 'op', 'type': 'Parameter'},
|
||||
'input_data': {'shape': shape, 'kind': 'data'},
|
||||
'testing_op': {'kind': 'op', 'type': self.current_op_type},
|
||||
'testing_data': {'shape': shape, 'kind': 'data'},
|
||||
'result': {'kind': 'op', 'type': 'Result'}
|
||||
}
|
||||
|
||||
ref_net = build_graph(nodes_attributes,
|
||||
[('input', 'input_data'),
|
||||
('input_data', 'testing_op'),
|
||||
('testing_op', 'testing_data'),
|
||||
('testing_data', 'result')
|
||||
])
|
||||
|
||||
return tf_net, ref_net
|
||||
|
||||
test_data_precommit = [dict(shape=[4, 6, 8, 10, 12])]
|
||||
|
||||
@pytest.mark.parametrize("params", test_data_precommit)
|
||||
@pytest.mark.parametrize("op_type", ['Elu',
|
||||
'Sigmoid',
|
||||
'Sin',
|
||||
'Sinh',
|
||||
'Cos',
|
||||
'Cosh',
|
||||
'Abs',
|
||||
'Negative',
|
||||
'Exp',
|
||||
'Tan',
|
||||
'Tanh',
|
||||
'Floor',
|
||||
'ReLU',
|
||||
'Ceiling',
|
||||
'Asin',
|
||||
'Acos',
|
||||
'Atan',
|
||||
'Log',
|
||||
'Sign',
|
||||
'SoftPlus',
|
||||
'Atanh',
|
||||
'Acosh',
|
||||
'Asinh',
|
||||
'LogicalNot',
|
||||
])
|
||||
@pytest.mark.precommit
|
||||
def test_unary_op_precommit(self, params, ie_device, precision, ir_version, temp_dir, op_type, use_new_frontend):
|
||||
if ie_device == 'GPU':
|
||||
pytest.skip("5D tensors is not supported on GPU")
|
||||
self._test(*self.create_net_with_unary_op(**params, ir_version=ir_version, op_type=op_type,
|
||||
use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
||||
|
||||
test_data = [dict(shape=[10, 12]),
|
||||
dict(shape=[8, 10, 12]),
|
||||
dict(shape=[6, 8, 10, 12]),
|
||||
dict(shape=[4, 6, 8, 10, 12])]
|
||||
|
||||
@pytest.mark.parametrize("params", test_data)
|
||||
@pytest.mark.parametrize("op_type", ['Elu',
|
||||
'Sigmoid',
|
||||
'Sin',
|
||||
'Sinh',
|
||||
'Cos',
|
||||
'Cosh',
|
||||
'Abs',
|
||||
'Negative',
|
||||
'Exp',
|
||||
'Tan',
|
||||
'Tanh',
|
||||
'Floor',
|
||||
'ReLU',
|
||||
'Ceiling',
|
||||
'Asin',
|
||||
'Acos',
|
||||
'Atan',
|
||||
'Log',
|
||||
'LogicalNot',
|
||||
'Sign',
|
||||
'SoftPlus',
|
||||
'Atanh',
|
||||
'Acosh',
|
||||
'Asinh'])
|
||||
@pytest.mark.nightly
|
||||
def test_unary_op(self, params, ie_device, precision, ir_version, temp_dir, op_type, use_new_frontend):
|
||||
if ie_device == 'GPU':
|
||||
pytest.skip("5D tensors is not supported on GPU")
|
||||
self._test(*self.create_net_with_unary_op(**params, ir_version=ir_version, op_type=op_type,
|
||||
use_new_frontend=use_new_frontend),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
|
Loading…
Reference in New Issue
Block a user