Add global config for test infra (#17547)

* [IE TESTS] Add Global test config for Subgraph base test

* Replace using option by function redefinition

* fix build

* remove extra changes for gna/template

* code style

* add nvidia to devices

* Fix debian

* remove nvidia
This commit is contained in:
Irina Efode 2023-06-24 01:07:36 +04:00 committed by GitHub
parent eb43f40ca1
commit 31b07c40d9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 79 additions and 41 deletions

View File

@ -3,6 +3,7 @@
//
#include "functional_test_utils/core_config.hpp"
#include "shared_test_classes/base/ov_subgraph.hpp"
void CoreConfiguration(LayerTestsUtils::LayerTestsCommon* test) {
// Within the test scope we don't need any implicit bf16 optimisations, so let's run the network as is.
@ -11,3 +12,17 @@ void CoreConfiguration(LayerTestsUtils::LayerTestsCommon* test) {
configuration.insert({InferenceEngine::PluginConfigParams::KEY_ENFORCE_BF16, InferenceEngine::PluginConfigParams::NO});
}
}
namespace ov {
namespace test {
void core_configuration(ov::test::SubgraphBaseTest* test) {
#if defined(OPENVINO_ARCH_X86) || defined(OPENVINO_ARCH_X86_64)
if (!test->configuration.count(InferenceEngine::PluginConfigParams::KEY_ENFORCE_BF16)) {
test->configuration.insert({InferenceEngine::PluginConfigParams::KEY_ENFORCE_BF16, InferenceEngine::PluginConfigParams::NO});
}
#endif
}
} // namespace test
} // namespace ov

View File

@ -8,6 +8,7 @@
#include <string>
#include "functional_test_utils/blob_utils.hpp"
#include "shared_test_classes/base/ov_subgraph.hpp"
void CoreConfiguration(LayerTestsUtils::LayerTestsCommon* test) {
const float MAX_VAL_2B_FEAT = 16384.0f;
@ -57,3 +58,11 @@ void CoreConfiguration(LayerTestsUtils::LayerTestsCommon* test) {
configuration[scaleFactorConfigKey] = std::to_string(floatScaleFactor);
}
}
namespace ov {
namespace test {
void core_configuration(ov::test::SubgraphBaseTest* test) {}
} // namespace test
} // namespace ov

View File

@ -3,6 +3,7 @@
//
#include "functional_test_utils/core_config.hpp"
#include "shared_test_classes/base/ov_subgraph.hpp"
void CoreConfiguration(LayerTestsUtils::LayerTestsCommon* test) {
std::shared_ptr<InferenceEngine::Core> core = PluginCache::get().ie();
@ -18,3 +19,23 @@ void CoreConfiguration(LayerTestsUtils::LayerTestsCommon* test) {
std::map<std::string, std::string> config = {{"INFERENCE_PRECISION_HINT", hint.get_type_name()}};
core->SetConfig(config, CommonTestUtils::DEVICE_GPU);
}
namespace ov {
namespace test {
void core_configuration(ov::test::SubgraphBaseTest* test) {
ov::element::Type hint = ov::element::f32;
for (auto& param : test->function->get_parameters()) {
if (param->get_output_element_type(0) == ov::element::f16) {
hint = ov::element::f16;
break;
}
}
// Set inference_precision hint to run fp32 model in fp32 runtime precision as default plugin execution precision
// may vary
test->core->set_property(CommonTestUtils::DEVICE_GPU, {{ov::hint::inference_precision.name(), hint.get_type_name()}});
}
} // namespace test
} // namespace ov

View File

@ -3,6 +3,15 @@
//
#include "functional_test_utils/core_config.hpp"
#include "shared_test_classes/base/ov_subgraph.hpp"
void CoreConfiguration(LayerTestsUtils::LayerTestsCommon* test) {
}
namespace ov {
namespace test {
void core_configuration(ov::test::SubgraphBaseTest* test) {}
} // namespace test
} // namespace ov

View File

@ -4,17 +4,17 @@
#include "functional_test_utils/core_config.hpp"
#include "common_test_utils/file_utils.hpp"
#include "shared_test_classes/base/ov_subgraph.hpp"
#include "openvino/util/file_util.hpp"
#include "conformance.hpp"
void CoreConfiguration(LayerTestsUtils::LayerTestsCommon* test) {
std::shared_ptr<InferenceEngine::Core> core = PluginCache::get().ie();
auto availableDevices = core->GetAvailableDevices();
std::string targetDevice = std::string(ov::test::conformance::targetDevice);
if (std::find(availableDevices.begin(), availableDevices.end(), targetDevice) == availableDevices.end()) {
core->RegisterPlugin(ov::util::make_plugin_library_name(CommonTestUtils::getExecutableDirectory(),
std::string(ov::test::conformance::targetPluginName) + IE_BUILD_POSTFIX),
ov::test::conformance::targetDevice);
}
}
void CoreConfiguration(LayerTestsUtils::LayerTestsCommon* test) {}
namespace ov {
namespace test {
void core_configuration(ov::test::SubgraphBaseTest* test) {}
} // namespace test
} // namespace ov

View File

@ -4,17 +4,17 @@
#include "functional_test_utils/core_config.hpp"
#include "common_test_utils/file_utils.hpp"
#include "shared_test_classes/base/ov_subgraph.hpp"
#include "openvino/util/file_util.hpp"
#include "conformance.hpp"
void CoreConfiguration(LayerTestsUtils::LayerTestsCommon* test) {
std::shared_ptr<InferenceEngine::Core> core = PluginCache::get().ie();
auto availableDevices = core->GetAvailableDevices();
std::string targetDevice = std::string(ov::test::conformance::targetDevice);
if (std::find(availableDevices.begin(), availableDevices.end(), targetDevice) == availableDevices.end()) {
core->RegisterPlugin(ov::util::make_plugin_library_name(CommonTestUtils::getExecutableDirectory(),
std::string(ov::test::conformance::targetPluginName) + IE_BUILD_POSTFIX),
ov::test::conformance::targetDevice);
}
}
void CoreConfiguration(LayerTestsUtils::LayerTestsCommon* test) {}
namespace ov {
namespace test {
void core_configuration(ov::test::SubgraphBaseTest* test) {}
} // namespace test
} // namespace ov

View File

@ -69,6 +69,8 @@ protected:
virtual std::vector<ov::Tensor> calculate_refs();
virtual std::vector<ov::Tensor> get_plugin_outputs();
friend void core_configuration(SubgraphBaseTest* test);
};
inline std::vector<InputShape> static_partial_shapes_to_test_representation(const std::vector<ov::PartialShape>& shapes) {

View File

@ -23,8 +23,9 @@
#include "ngraph_functions/utils/ngraph_helpers.hpp"
#include "common_test_utils/file_utils.hpp"
#include "functional_test_utils/crash_handler.hpp"
#include "common_test_utils/ov_tensor_utils.hpp"
#include "common_test_utils/ov_tensor_utils.hpp"
#include "functional_test_utils/crash_handler.hpp"
#include "functional_test_utils/skip_tests_config.hpp"
#include "shared_test_classes/base/ov_subgraph.hpp"
@ -210,26 +211,7 @@ void SubgraphBaseTest::compile_model() {
if (functionRefs == nullptr) {
functionRefs = function->clone();
}
// Within the test scope we don't need any implicit bf16 optimisations, so let's run the network as is.
#if defined(OPENVINO_ARCH_X86) || defined(OPENVINO_ARCH_X86_64)
if (targetDevice == CommonTestUtils::DEVICE_CPU && !configuration.count(InferenceEngine::PluginConfigParams::KEY_ENFORCE_BF16)) {
configuration.insert({InferenceEngine::PluginConfigParams::KEY_ENFORCE_BF16, InferenceEngine::PluginConfigParams::NO});
}
#endif
// Set inference_precision hint to run fp32 model in fp32 runtime precision as default plugin execution precision may vary
if (targetDevice == CommonTestUtils::DEVICE_GPU) {
ov::element::Type hint = ov::element::f32;
for (auto& param : function->get_parameters()) {
if (param->get_output_element_type(0) == ov::element::f16) {
hint = ov::element::f16;
break;
}
}
configuration.insert({ov::hint::inference_precision.name(), hint});
}
core_configuration(this);
compiledModel = core->compile_model(function, targetDevice, configuration);
if (is_report_stages) {
auto end_time = std::chrono::system_clock::now();