diff --git a/src/inference/src/dev/core_impl.cpp b/src/inference/src/dev/core_impl.cpp index c0a99d0fbb7..462a0b9f7cc 100644 --- a/src/inference/src/dev/core_impl.cpp +++ b/src/inference/src/dev/core_impl.cpp @@ -743,7 +743,7 @@ ov::SoPtr ov::CoreImpl::compile_model(const std::shared_ptr< // if auto-batching is applicable, the below function will patch the device name and config accordingly: auto model = apply_auto_batching(model_, deviceName, config_with_batch); - auto parsed = parseDeviceNameIntoConfig(deviceName, config_with_batch); + auto parsed = parseDeviceNameIntoConfig(deviceName, config_with_batch, is_proxy_device(device_name)); auto plugin = get_plugin(parsed._deviceName); ov::SoPtr res; auto cacheManager = coreConfig.get_cache_config_for_device(plugin, parsed._config)._cacheManager; @@ -776,7 +776,7 @@ ov::SoPtr ov::CoreImpl::compile_model(const std::shared_ptr< // if auto-batching is applicable, the below function will patch the device name and config accordingly: auto model = apply_auto_batching(model_, deviceName, config_with_batch); - auto parsed = parseDeviceNameIntoConfig(deviceName, config_with_batch); + auto parsed = parseDeviceNameIntoConfig(deviceName, config_with_batch, is_proxy_device(deviceName)); auto plugin = get_plugin(parsed._deviceName); ov::SoPtr res; auto cacheManager = coreConfig.get_cache_config_for_device(plugin, parsed._config)._cacheManager; @@ -1095,8 +1095,9 @@ std::shared_ptr ov::CoreImpl::apply_auto_batching(const std::sh const auto disabled = batch_mode->second.as() == CONFIG_VALUE(NO); // virtual plugins like AUTO/MULTI will need the config // e.g. to deduce the #requests correctly + // proxy plugin should also keep the config // otherwise, no need for this config key in the rest of loading - if (!is_virtual_device(deviceName)) + if (!is_virtual_device(deviceName) && !is_proxy_device(deviceName)) config.erase(batch_mode); if (disabled) return model; diff --git a/src/plugins/proxy/tests/batch_compliance_test.cpp b/src/plugins/proxy/tests/batch_compliance_test.cpp new file mode 100644 index 00000000000..73cf894291f --- /dev/null +++ b/src/plugins/proxy/tests/batch_compliance_test.cpp @@ -0,0 +1,34 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/proxy/properties.hpp" +#include "openvino/runtime/properties.hpp" +#include "proxy_tests.hpp" + +using namespace ov::proxy::tests; + +TEST_F(ProxyTests, can_parse_and_inherit_batch_property) { + register_plugin_support_reshape(core, "MOCK_DEVICE", {{ov::proxy::configuration::alias.name(), "ALIAS_MOCK"}}); + auto available_devices = core.get_available_devices(); + auto model = create_model_with_add(); + auto compiled_model_default = core.compile_model(model, "MOCK_DEVICE", ov::hint::performance_mode("THROUGHPUT")); +#ifdef ENABLE_AUTO_BATCH + EXPECT_NO_THROW(compiled_model_default.get_property(ov::auto_batch_timeout)); // batch enabled by default + EXPECT_EQ(compiled_model_default.get_property(ov::auto_batch_timeout), 1000); // default value +#endif + auto compiled_model_with_batch = core.compile_model(model, + "MOCK_DEVICE", + ov::hint::performance_mode("THROUGHPUT"), + ov::hint::allow_auto_batching(true), + ov::auto_batch_timeout(8)); +#ifdef ENABLE_AUTO_BATCH + EXPECT_NO_THROW(compiled_model_with_batch.get_property(ov::auto_batch_timeout)); + EXPECT_EQ(compiled_model_with_batch.get_property(ov::auto_batch_timeout), 8); +#endif + auto compiled_model_no_batch = core.compile_model(model, + "MOCK_DEVICE", + ov::hint::performance_mode("THROUGHPUT"), + ov::hint::allow_auto_batching(false)); + EXPECT_ANY_THROW(compiled_model_no_batch.get_property(ov::auto_batch_timeout)); +} \ No newline at end of file