diff --git a/tests/time_tests/include/timetests_helper/utils.h b/tests/time_tests/include/timetests_helper/utils.h index 841721ac534..86c9addaab6 100644 --- a/tests/time_tests/include/timetests_helper/utils.h +++ b/tests/time_tests/include/timetests_helper/utils.h @@ -4,6 +4,9 @@ #pragma once +#include +#include + #include namespace TimeTest { @@ -12,9 +15,24 @@ namespace TimeTest { * @param filename - name of the file which extension should be extracted * @return string with extracted file extension */ -std::string fileExt(const std::string& filename) { +std::string fileExt(const std::string &filename) { auto pos = filename.rfind('.'); if (pos == std::string::npos) return ""; return filename.substr(pos + 1); } + +/** + * @brief Function that enables performance hint for specified device. + */ +void setPerformanceConfig(InferenceEngine::Core ie, const std::string &device) { + std::vector supported_config_keys = ie.GetMetric(device, METRIC_KEY(SUPPORTED_CONFIG_KEYS)); + + if (std::find(supported_config_keys.begin(), supported_config_keys.end(), "PERFORMANCE_HINT") == + supported_config_keys.end()) { + std::cerr << "Device " << device << " doesn't support config key 'PERFORMANCE_HINT'!\n" + << "Performance config was not set."; + } + else + ie.SetConfig({{CONFIG_KEY(PERFORMANCE_HINT), CONFIG_VALUE(LATENCY)}}, device); +} } \ No newline at end of file diff --git a/tests/time_tests/scripts/run_timetest.py b/tests/time_tests/scripts/run_timetest.py index 7e2b140ef7c..1f60c7d6289 100644 --- a/tests/time_tests/scripts/run_timetest.py +++ b/tests/time_tests/scripts/run_timetest.py @@ -62,25 +62,12 @@ def prepare_executable_cmd(args: dict): str(args["executable"].resolve(strict=True)), "-m", str(args["model"].resolve(strict=True)), "-d", args["device"], - "-p", args["perf_hint"], + "-p" if args["perf_hint"] else "", "-v" if args["vpu_compiler"] else "", args['vpu_compiler'] if args["vpu_compiler"] else "", "-c" if args["cpu_cache"] else "", ] -def get_cache_stats(flatten_data): - """Update statistics for run with models cache""" - data_cache = { - "full_run_using_cache": flatten_data["full_run"], - "time_to_inference_using_cache": flatten_data["time_to_inference"], - "load_plugin": flatten_data["load_plugin"], - "load_network_using_cache": flatten_data["load_network"], - "first_inference": flatten_data["first_inference"], - "fill_inputs": flatten_data["fill_inputs"], - } - return data_cache - - def run_timetest(args: dict, log=None): """Run provided executable several times and aggregate collected statistics""" if log is None: @@ -108,9 +95,6 @@ def run_timetest(args: dict, log=None): flatten_data = {} parse_stats(raw_data[0], flatten_data) - if run_iter > 0 and args["cpu_cache"]: - flatten_data = get_cache_stats(flatten_data) - log.debug(f"Statistics after run of executable #{run_iter}: {flatten_data}") # Combine statistics from several runs @@ -154,10 +138,8 @@ def cli_parser(): help="path to a file to save aggregated statistics") parser.add_argument("-p", dest="perf_hint", - choices=["LATENCY", "THROUGHPUT"], - default="LATENCY", - type=str, - help="Enables performance hint for specified device. Default hint is LATENCY") + action="store_true", + help="Enables 'LATENCY' performance hint for specified device.") exclusive_group = parser.add_mutually_exclusive_group(required=False) exclusive_group.add_argument("-c", dest="cpu_cache", diff --git a/tests/time_tests/src/timetests/timetest_infer.cpp b/tests/time_tests/src/timetests/timetest_infer.cpp index 1017c5daf84..5612c5dd0f2 100644 --- a/tests/time_tests/src/timetests/timetest_infer.cpp +++ b/tests/time_tests/src/timetests/timetest_infer.cpp @@ -3,7 +3,6 @@ // #include -#include #include #include "common_utils.h" @@ -17,9 +16,9 @@ using namespace InferenceEngine; * main(). The function should not throw any exceptions and responsible for * handling it by itself. */ -int runPipeline(const std::string &model, const std::string &device, const std::string &performanceHint, +int runPipeline(const std::string &model, const std::string &device, const bool performanceHint, const bool isCacheEnabled, const std::string &vpuCompiler) { - auto pipeline = [](const std::string &model, const std::string &device, const std::string &performanceHint, + auto pipeline = [](const std::string &model, const std::string &device, const bool performanceHint, const bool isCacheEnabled, const std::string &vpuCompiler) { Core ie; CNNNetwork cnnNetwork; @@ -27,25 +26,6 @@ int runPipeline(const std::string &model, const std::string &device, const std:: InferRequest inferRequest; size_t batchSize = 0; - if (!performanceHint.empty()) { - std::vector supported_config_keys = ie.GetMetric(device, METRIC_KEY(SUPPORTED_CONFIG_KEYS)); - - // enables performance hint for specified device - std::string performanceConfig; - if (performanceHint == "THROUGHPUT") - performanceConfig = CONFIG_VALUE(THROUGHPUT); - else if (performanceHint == "LATENCY") - performanceConfig = CONFIG_VALUE(LATENCY); - - if (std::find(supported_config_keys.begin(), supported_config_keys.end(), "PERFORMANCE_HINT") == - supported_config_keys.end()) { - std::cerr << "Device " << device << " doesn't support config key 'PERFORMANCE_HINT'!\n" - << "Performance config was not set."; - } - else - ie.SetConfig({{CONFIG_KEY(PERFORMANCE_HINT), performanceConfig}}, device); - } - // set config for VPUX device std::map vpuConfig = {}; if (vpuCompiler == "MCM") @@ -58,6 +38,9 @@ int runPipeline(const std::string &model, const std::string &device, const std:: SCOPED_TIMER(time_to_inference); { SCOPED_TIMER(load_plugin); + if (performanceHint) { + TimeTest::setPerformanceConfig(ie, device); + } ie.GetVersions(device); if (isCacheEnabled) diff --git a/tests/time_tests/src/timetests_helper/cli.h b/tests/time_tests/src/timetests_helper/cli.h index 6ad8fc78084..3ebf2a8f924 100644 --- a/tests/time_tests/src/timetests_helper/cli.h +++ b/tests/time_tests/src/timetests_helper/cli.h @@ -28,7 +28,7 @@ static const char target_device_message[] = /// @brief message for vpu argument static const char performance_hint_message[] = - "Not required. Enables performance hint for specified device. Available hints are LATENCY and THROUGHPUT."; + "Not required. Enables performance hint 'LATENCY' for specified device."; /// @brief message for cache argument static const char cpu_cache_message[] = @@ -36,7 +36,7 @@ static const char cpu_cache_message[] = /// @brief message for vpu argument static const char vpu_compiler_message[] = - "Not required. Use this key to run timetests using MLIR or MCM VPUX compiler type."; + "Not required. Use this key to run timetests with 'MLIR' or 'MCM' VPUX compiler type."; /// @brief message for statistics path argument static const char statistics_path_message[] = @@ -58,7 +58,7 @@ DEFINE_string(d, "", target_device_message); /// @brief Define parameter for set performance hint for target device
/// It is a non-required parameter -DEFINE_string(p, "", performance_hint_message); +DEFINE_bool(p, false, performance_hint_message); /// @brief Define parameter for set CPU models caching
/// It is a non-required parameter @@ -86,7 +86,7 @@ static void showUsage() { << std::endl; std::cout << " -s \"\" " << statistics_path_message << std::endl; - std::cout << " -p \"\" " << performance_hint_message << std::endl; + std::cout << " -p " << performance_hint_message << std::endl; std::cout << " -c " << cpu_cache_message << std::endl; std::cout << " -v \"\" " << vpu_compiler_message << std::endl; } diff --git a/tests/time_tests/src/timetests_helper/main.cpp b/tests/time_tests/src/timetests_helper/main.cpp index 083027db24f..193e5339711 100644 --- a/tests/time_tests/src/timetests_helper/main.cpp +++ b/tests/time_tests/src/timetests_helper/main.cpp @@ -8,7 +8,7 @@ #include -int runPipeline(const std::string &model, const std::string &device, const std::string &performanceHint, +int runPipeline(const std::string &model, const std::string &device, const bool performanceHint, const bool isCacheEnabled, const std::string &vpuCompiler); /** diff --git a/tests/time_tests/test_runner/conftest.py b/tests/time_tests/test_runner/conftest.py index 4ccbe4401b8..647e603857f 100644 --- a/tests/time_tests/test_runner/conftest.py +++ b/tests/time_tests/test_runner/conftest.py @@ -66,10 +66,8 @@ def pytest_addoption(parser): ) test_args_parser.addoption( "--perf_hint", - choices=['LATENCY', 'THROUGHPUT'], - default='LATENCY', - type=str, - help='Enables performance hint for specified device. Default hint is LATENCY' + action='store_true', + help="Enables 'LATENCY' performance hint for specified device." ) test_args_parser.addoption( "--vpu_compiler", @@ -274,6 +272,10 @@ def prepare_db_info(request, test_info, executable, niter, manifest_metadata): with open(db_meta_path, "r") as db_meta_f: test_info["db_info"].update(json.load(db_meta_f)) + # add cpu cache status + cpu_cache = True if request.config.getoption("cpu_cache") else False + test_info["db_info"].update({"use_cpu_cache": cpu_cache}) + # add test info info = { # results will be added immediately before uploading to DB in `pytest_runtest_makereport`