added ov InferRequest behaviour tests (#7811)
This commit is contained in:
parent
479081ff32
commit
c39e6fcfd8
@ -0,0 +1,21 @@
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "behavior/ov_infer_request/callback.hpp"
|
||||
|
||||
using namespace ov::test::behavior;
|
||||
|
||||
namespace {
|
||||
const std::vector<std::map<std::string, std::string>> configs = {
|
||||
{}
|
||||
};
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestCallbackTests,
|
||||
::testing::Combine(
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
|
||||
::testing::ValuesIn(configs)),
|
||||
OVInferRequestCallbackTests::getTestCaseName);
|
||||
} // namespace
|
@ -0,0 +1,19 @@
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "behavior/ov_infer_request/cancellation.hpp"
|
||||
|
||||
using namespace ov::test::behavior;
|
||||
|
||||
namespace {
|
||||
const std::vector<std::map<std::string, std::string>> configs = {
|
||||
{},
|
||||
};
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestCancellationTests,
|
||||
::testing::Combine(
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
|
||||
::testing::ValuesIn(configs)),
|
||||
OVInferRequestCancellationTests::getTestCaseName);
|
||||
} // namespace
|
@ -4,9 +4,9 @@
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "behavior/infer_request/infer_request_dynamic.hpp"
|
||||
#include "behavior/ov_infer_request/infer_request_dynamic.hpp"
|
||||
|
||||
using namespace BehaviorTestsDefinitions;
|
||||
using namespace ov::test::behavior;
|
||||
|
||||
namespace {
|
||||
|
||||
@ -17,22 +17,22 @@ const std::vector<std::map<std::string, std::string>> configs = {
|
||||
const std::vector<std::map<std::string, std::string>> HeteroConfigs = {
|
||||
{{"TARGET_FALLBACK", CommonTestUtils::DEVICE_TEMPLATE}}};
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, InferRequestDynamicTests,
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestDynamicTests,
|
||||
::testing::Combine(
|
||||
::testing::Values(ngraph::builder::subgraph::makeSplitConvConcat()),
|
||||
::testing::Values(std::vector<std::pair<std::vector<size_t>, std::vector<size_t>>>{{{1, 4, 20, 20}, {1, 10, 18, 18}},
|
||||
{{2, 4, 20, 20}, {2, 10, 18, 18}}}),
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
|
||||
::testing::ValuesIn(configs)),
|
||||
InferRequestDynamicTests::getTestCaseName);
|
||||
OVInferRequestDynamicTests::getTestCaseName);
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_Hetero_BehaviorTests, InferRequestDynamicTests,
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_Hetero_BehaviorTests, OVInferRequestDynamicTests,
|
||||
::testing::Combine(
|
||||
::testing::Values(ngraph::builder::subgraph::makeSplitConvConcat()),
|
||||
::testing::Values(std::vector<std::pair<std::vector<size_t>, std::vector<size_t>>>{{{1, 4, 20, 20}, {1, 10, 18, 18}},
|
||||
{{2, 4, 20, 20}, {2, 10, 18, 18}}}),
|
||||
::testing::Values(CommonTestUtils::DEVICE_HETERO),
|
||||
::testing::ValuesIn(HeteroConfigs)),
|
||||
InferRequestDynamicTests::getTestCaseName);
|
||||
OVInferRequestDynamicTests::getTestCaseName);
|
||||
|
||||
} // namespace
|
@ -2,7 +2,7 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "behavior/infer_request/inference_chaining.hpp"
|
||||
#include "behavior/ov_infer_request/inference_chaining.hpp"
|
||||
#include "common_test_utils/test_constants.hpp"
|
||||
|
||||
using namespace ov::test::behavior;
|
@ -0,0 +1,22 @@
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "behavior/ov_infer_request/io_tensor.hpp"
|
||||
|
||||
using namespace ov::test::behavior;
|
||||
|
||||
namespace {
|
||||
const std::vector<std::map<std::string, std::string>> configs = {
|
||||
{}
|
||||
};
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestIOTensorTest,
|
||||
::testing::Combine(
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
|
||||
::testing::ValuesIn(configs)),
|
||||
OVInferRequestIOTensorTest::getTestCaseName);
|
||||
|
||||
} // namespace
|
@ -0,0 +1,23 @@
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "behavior/ov_infer_request/multithreading.hpp"
|
||||
|
||||
using namespace ov::test::behavior;
|
||||
|
||||
namespace {
|
||||
|
||||
const std::vector<std::map<std::string, std::string>> configs = {
|
||||
{}
|
||||
};
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestMultithreadingTests,
|
||||
::testing::Combine(
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
|
||||
::testing::ValuesIn(configs)),
|
||||
OVInferRequestMultithreadingTests::getTestCaseName);
|
||||
|
||||
} // namespace
|
@ -0,0 +1,22 @@
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "behavior/ov_infer_request/wait.hpp"
|
||||
|
||||
using namespace ov::test::behavior;
|
||||
|
||||
namespace {
|
||||
const std::vector<std::map<std::string, std::string>> configs = {
|
||||
{}
|
||||
};
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestWaitTests,
|
||||
::testing::Combine(
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
|
||||
::testing::ValuesIn(configs)),
|
||||
OVInferRequestWaitTests::getTestCaseName);
|
||||
|
||||
} // namespace
|
@ -0,0 +1,22 @@
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "openvino/core/except.hpp"
|
||||
#include "openvino/runtime/common.hpp"
|
||||
|
||||
namespace ov {
|
||||
namespace runtime {
|
||||
/// Thrown in case of canceled asynchronous operation
|
||||
class OPENVINO_RUNTIME_API Cancelled : public Exception {
|
||||
using Exception::Exception;
|
||||
};
|
||||
|
||||
/// Thrown in case of busy infer request
|
||||
class OPENVINO_RUNTIME_API Busy : public Exception {
|
||||
using Exception::Exception;
|
||||
};
|
||||
} // namespace runtime
|
||||
} // namespace ov
|
@ -13,7 +13,7 @@
|
||||
#include "ie_infer_async_request_base.hpp"
|
||||
#include "ie_ngraph_utils.hpp"
|
||||
#include "ie_remote_context.hpp"
|
||||
#include "openvino/core/except.hpp"
|
||||
#include "openvino/runtime/exception.hpp"
|
||||
#include "openvino/runtime/infer_request.hpp"
|
||||
#include "transformations/utils/utils.hpp"
|
||||
|
||||
@ -50,6 +50,8 @@ namespace InferenceEngine {
|
||||
OPENVINO_ASSERT(_impl != nullptr, "InferRequest was not initialized."); \
|
||||
try { \
|
||||
__VA_ARGS__; \
|
||||
} catch (const ::InferenceEngine::RequestBusy& ex) { \
|
||||
throw ov::runtime::Busy(ex.what()); \
|
||||
} catch (const std::exception& ex) { \
|
||||
throw ov::Exception(ex.what()); \
|
||||
} catch (...) { \
|
||||
@ -405,11 +407,29 @@ void InferRequest::start_async() {
|
||||
}
|
||||
|
||||
void InferRequest::wait() {
|
||||
OV_INFER_REQ_CALL_STATEMENT(_impl->Wait(ie::InferRequest::RESULT_READY);)
|
||||
OPENVINO_ASSERT(_impl != nullptr, "InferRequest was not initialized.");
|
||||
try {
|
||||
_impl->Wait(ie::InferRequest::RESULT_READY);
|
||||
} catch (const ie::InferCancelled& e) {
|
||||
throw Cancelled{e.what()};
|
||||
} catch (const std::exception& ex) {
|
||||
throw Exception(ex.what());
|
||||
} catch (...) {
|
||||
OPENVINO_UNREACHABLE("Unexpected exception");
|
||||
}
|
||||
}
|
||||
|
||||
bool InferRequest::wait_for(const std::chrono::milliseconds timeout) {
|
||||
OV_INFER_REQ_CALL_STATEMENT(return _impl->Wait(timeout.count()) == ie::OK;)
|
||||
OPENVINO_ASSERT(_impl != nullptr, "InferRequest was not initialized.");
|
||||
try {
|
||||
return _impl->Wait(timeout.count()) == ie::OK;
|
||||
} catch (const ie::InferCancelled& e) {
|
||||
throw Cancelled{e.what()};
|
||||
} catch (const std::exception& ex) {
|
||||
throw Exception(ex.what());
|
||||
} catch (...) {
|
||||
OPENVINO_UNREACHABLE("Unexpected exception");
|
||||
}
|
||||
}
|
||||
|
||||
void InferRequest::set_callback(std::function<void(std::exception_ptr)> callback) {
|
||||
|
@ -0,0 +1,39 @@
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "behavior/ov_infer_request/callback.hpp"
|
||||
|
||||
using namespace ov::test::behavior;
|
||||
|
||||
namespace {
|
||||
const std::vector<std::map<std::string, std::string>> configs = {
|
||||
{},
|
||||
{{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}},
|
||||
{{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, "0"}, {InferenceEngine::PluginConfigParams::KEY_CPU_THREADS_NUM, "1"}}
|
||||
};
|
||||
|
||||
const std::vector<std::map<std::string, std::string>> multiConfigs = {
|
||||
{{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , CommonTestUtils::DEVICE_CPU}}
|
||||
};
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestCallbackTests,
|
||||
::testing::Combine(
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU),
|
||||
::testing::ValuesIn(configs)),
|
||||
OVInferRequestCallbackTests::getTestCaseName);
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, OVInferRequestCallbackTests,
|
||||
::testing::Combine(
|
||||
::testing::Values(CommonTestUtils::DEVICE_MULTI),
|
||||
::testing::ValuesIn(multiConfigs)),
|
||||
OVInferRequestCallbackTests::getTestCaseName);
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVInferRequestCallbackTests,
|
||||
::testing::Combine(
|
||||
::testing::Values(CommonTestUtils::DEVICE_AUTO),
|
||||
::testing::ValuesIn(multiConfigs)),
|
||||
OVInferRequestCallbackTests::getTestCaseName);
|
||||
} // namespace
|
@ -0,0 +1,19 @@
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "behavior/ov_infer_request/cancellation.hpp"
|
||||
|
||||
using namespace ov::test::behavior;
|
||||
|
||||
namespace {
|
||||
const std::vector<std::map<std::string, std::string>> configs = {
|
||||
{},
|
||||
};
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestCancellationTests,
|
||||
::testing::Combine(
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU),
|
||||
::testing::ValuesIn(configs)),
|
||||
OVInferRequestCancellationTests::getTestCaseName);
|
||||
} // namespace
|
@ -4,16 +4,12 @@
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "behavior/infer_request/infer_request_dynamic.hpp"
|
||||
#include "behavior/ov_infer_request/infer_request_dynamic.hpp"
|
||||
|
||||
using namespace BehaviorTestsDefinitions;
|
||||
using namespace ov::test::behavior;
|
||||
|
||||
namespace {
|
||||
|
||||
const std::vector<InferenceEngine::Precision> netPrecisions = {
|
||||
InferenceEngine::Precision::FP32
|
||||
};
|
||||
|
||||
const std::vector<std::map<std::string, std::string>> configs = {
|
||||
{}
|
||||
};
|
||||
@ -64,7 +60,7 @@ std::shared_ptr<ngraph::Function> getFunction2() {
|
||||
return std::make_shared<ngraph::Function>(concat, params, "SplitAddConcat");
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests_1, InferRequestDynamicTests,
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests_1, OVInferRequestDynamicTests,
|
||||
::testing::Combine(
|
||||
::testing::Values(getFunction1()),
|
||||
::testing::Values(std::vector<std::pair<std::vector<size_t>, std::vector<size_t>>>{
|
||||
@ -72,9 +68,9 @@ INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests_1, InferRequestDynamicTests,
|
||||
{{2, 4, 20, 20}, {2, 4, 20, 20}}}),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU),
|
||||
::testing::ValuesIn(configs)),
|
||||
InferRequestDynamicTests::getTestCaseName);
|
||||
OVInferRequestDynamicTests::getTestCaseName);
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests_2, InferRequestDynamicTests,
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests_2, OVInferRequestDynamicTests,
|
||||
::testing::Combine(
|
||||
::testing::Values(getFunction2()),
|
||||
::testing::Values(std::vector<std::pair<std::vector<size_t>, std::vector<size_t>>>{
|
||||
@ -82,9 +78,9 @@ INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests_2, InferRequestDynamicTests,
|
||||
{{2, 4, 20, 20}, {2, 2, 20, 40}}}),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU),
|
||||
::testing::ValuesIn(configs)),
|
||||
InferRequestDynamicTests::getTestCaseName);
|
||||
OVInferRequestDynamicTests::getTestCaseName);
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_Hetero_BehaviorTests, InferRequestDynamicTests,
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_Hetero_BehaviorTests, OVInferRequestDynamicTests,
|
||||
::testing::Combine(
|
||||
::testing::Values(getFunction2()),
|
||||
::testing::Values(std::vector<std::pair<std::vector<size_t>, std::vector<size_t>>>{
|
||||
@ -92,6 +88,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_Hetero_BehaviorTests, InferRequestDynamicTests,
|
||||
{{2, 4, 20, 20}, {2, 2, 20, 40}}}),
|
||||
::testing::Values(CommonTestUtils::DEVICE_HETERO),
|
||||
::testing::ValuesIn(HeteroConfigs)),
|
||||
InferRequestDynamicTests::getTestCaseName);
|
||||
OVInferRequestDynamicTests::getTestCaseName);
|
||||
|
||||
} // namespace
|
@ -2,7 +2,7 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "behavior/infer_request/inference_chaining.hpp"
|
||||
#include "behavior/ov_infer_request/inference_chaining.hpp"
|
||||
#include "common_test_utils/test_constants.hpp"
|
||||
|
||||
using namespace ov::test::behavior;
|
@ -0,0 +1,84 @@
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "behavior/ov_infer_request/io_tensor.hpp"
|
||||
|
||||
using namespace ov::test::behavior;
|
||||
|
||||
namespace {
|
||||
const std::vector<std::map<std::string, std::string>> configs = {
|
||||
{},
|
||||
{{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}},
|
||||
{{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, "0"}, {InferenceEngine::PluginConfigParams::KEY_CPU_THREADS_NUM, "1"}}
|
||||
};
|
||||
|
||||
const std::vector<std::map<std::string, std::string>> Multiconfigs = {
|
||||
{{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , CommonTestUtils::DEVICE_CPU}}
|
||||
};
|
||||
|
||||
const std::vector<std::map<std::string, std::string>> Autoconfigs = {
|
||||
{{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , CommonTestUtils::DEVICE_CPU}}
|
||||
};
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestIOTensorTest,
|
||||
::testing::Combine(
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU),
|
||||
::testing::ValuesIn(configs)),
|
||||
OVInferRequestIOTensorTest::getTestCaseName);
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, OVInferRequestIOTensorTest,
|
||||
::testing::Combine(
|
||||
::testing::Values(CommonTestUtils::DEVICE_MULTI),
|
||||
::testing::ValuesIn(Multiconfigs)),
|
||||
OVInferRequestIOTensorTest::getTestCaseName);
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVInferRequestIOTensorTest,
|
||||
::testing::Combine(
|
||||
::testing::Values(CommonTestUtils::DEVICE_AUTO),
|
||||
::testing::ValuesIn(Autoconfigs)),
|
||||
OVInferRequestIOTensorTest::getTestCaseName);
|
||||
|
||||
std::vector<ov::element::Type> prcs = {
|
||||
ov::element::boolean,
|
||||
ov::element::bf16,
|
||||
ov::element::f16,
|
||||
ov::element::f32,
|
||||
ov::element::f64,
|
||||
ov::element::i4,
|
||||
ov::element::i8,
|
||||
ov::element::i16,
|
||||
ov::element::i32,
|
||||
ov::element::i64,
|
||||
ov::element::u1,
|
||||
ov::element::u4,
|
||||
ov::element::u8,
|
||||
ov::element::u16,
|
||||
ov::element::u32,
|
||||
ov::element::u64,
|
||||
};
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestIOTensorSetPrecisionTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(prcs),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU),
|
||||
::testing::ValuesIn(configs)),
|
||||
OVInferRequestIOTensorSetPrecisionTest::getTestCaseName);
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, OVInferRequestIOTensorSetPrecisionTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(prcs),
|
||||
::testing::Values(CommonTestUtils::DEVICE_MULTI),
|
||||
::testing::ValuesIn(Multiconfigs)),
|
||||
OVInferRequestIOTensorSetPrecisionTest::getTestCaseName);
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVInferRequestIOTensorSetPrecisionTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(prcs),
|
||||
::testing::Values(CommonTestUtils::DEVICE_AUTO),
|
||||
::testing::ValuesIn(Autoconfigs)),
|
||||
OVInferRequestIOTensorSetPrecisionTest::getTestCaseName);
|
||||
|
||||
} // namespace
|
@ -0,0 +1,41 @@
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "behavior/ov_infer_request/multithreading.hpp"
|
||||
|
||||
using namespace ov::test::behavior;
|
||||
|
||||
namespace {
|
||||
|
||||
const std::vector<std::map<std::string, std::string>> configs = {
|
||||
{},
|
||||
{{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}},
|
||||
{{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, "0"}, {InferenceEngine::PluginConfigParams::KEY_CPU_THREADS_NUM, "1"}}
|
||||
};
|
||||
|
||||
const std::vector<std::map<std::string, std::string>> Multiconfigs = {
|
||||
{{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , CommonTestUtils::DEVICE_CPU}}
|
||||
};
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestMultithreadingTests,
|
||||
::testing::Combine(
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU),
|
||||
::testing::ValuesIn(configs)),
|
||||
OVInferRequestMultithreadingTests::getTestCaseName);
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, OVInferRequestMultithreadingTests,
|
||||
::testing::Combine(
|
||||
::testing::Values(CommonTestUtils::DEVICE_MULTI),
|
||||
::testing::ValuesIn(Multiconfigs)),
|
||||
OVInferRequestMultithreadingTests::getTestCaseName);
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVInferRequestMultithreadingTests,
|
||||
::testing::Combine(
|
||||
::testing::Values(CommonTestUtils::DEVICE_AUTO),
|
||||
::testing::ValuesIn(Multiconfigs)),
|
||||
OVInferRequestMultithreadingTests::getTestCaseName);
|
||||
|
||||
} // namespace
|
@ -0,0 +1,59 @@
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "behavior/ov_infer_request/perf_counters.hpp"
|
||||
|
||||
using namespace ov::test::behavior;
|
||||
|
||||
namespace {
|
||||
TEST_P(OVInferRequestPerfCountersTest, CheckOperationInProfilingInfo) {
|
||||
req = execNet.create_infer_request();
|
||||
ASSERT_NO_THROW(req.infer());
|
||||
|
||||
std::vector<ov::runtime::ProfilingInfo> profiling_info;
|
||||
ASSERT_NO_THROW(profiling_info = req.get_profiling_info());
|
||||
|
||||
for (const auto& op : function->get_ops()) {
|
||||
auto op_is_in_profiling_info = std::any_of(std::begin(profiling_info), std::end(profiling_info),
|
||||
[&] (const ov::runtime::ProfilingInfo& info) {
|
||||
if (info.node_name.find(op->get_friendly_name() + "_") != std::string::npos || info.node_name == op->get_friendly_name()) {
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
});
|
||||
ASSERT_TRUE(op_is_in_profiling_info) << "For op: " << op;
|
||||
}
|
||||
}
|
||||
|
||||
const std::vector<std::map<std::string, std::string>> configs = {
|
||||
{}
|
||||
};
|
||||
|
||||
const std::vector<std::map<std::string, std::string>> Multiconfigs = {
|
||||
{{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , CommonTestUtils::DEVICE_CPU}}
|
||||
};
|
||||
|
||||
const std::vector<std::map<std::string, std::string>> Autoconfigs = {
|
||||
{{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , CommonTestUtils::DEVICE_CPU}}
|
||||
};
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestPerfCountersTest,
|
||||
::testing::Combine(
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU),
|
||||
::testing::ValuesIn(configs)),
|
||||
OVInferRequestPerfCountersTest::getTestCaseName);
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, OVInferRequestPerfCountersTest,
|
||||
::testing::Combine(
|
||||
::testing::Values(CommonTestUtils::DEVICE_MULTI),
|
||||
::testing::ValuesIn(Multiconfigs)),
|
||||
OVInferRequestPerfCountersTest::getTestCaseName);
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVInferRequestPerfCountersTest,
|
||||
::testing::Combine(
|
||||
::testing::Values(CommonTestUtils::DEVICE_AUTO),
|
||||
::testing::ValuesIn(Autoconfigs)),
|
||||
OVInferRequestPerfCountersTest::getTestCaseName);
|
||||
} // namespace
|
@ -0,0 +1,45 @@
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "behavior/ov_infer_request/wait.hpp"
|
||||
|
||||
using namespace ov::test::behavior;
|
||||
|
||||
namespace {
|
||||
|
||||
const std::vector<std::map<std::string, std::string>> configs = {
|
||||
{},
|
||||
{{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}},
|
||||
{{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, "0"}, {InferenceEngine::PluginConfigParams::KEY_CPU_THREADS_NUM, "1"}}
|
||||
};
|
||||
|
||||
const std::vector<std::map<std::string, std::string>> Multiconfigs = {
|
||||
{{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , CommonTestUtils::DEVICE_CPU}}
|
||||
};
|
||||
|
||||
const std::vector<std::map<std::string, std::string>> Autoconfigs = {
|
||||
{{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , CommonTestUtils::DEVICE_CPU}}
|
||||
};
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestWaitTests,
|
||||
::testing::Combine(
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU),
|
||||
::testing::ValuesIn(configs)),
|
||||
OVInferRequestWaitTests::getTestCaseName);
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, OVInferRequestWaitTests,
|
||||
::testing::Combine(
|
||||
::testing::Values(CommonTestUtils::DEVICE_MULTI),
|
||||
::testing::ValuesIn(Multiconfigs)),
|
||||
OVInferRequestWaitTests::getTestCaseName);
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVInferRequestWaitTests,
|
||||
::testing::Combine(
|
||||
::testing::Values(CommonTestUtils::DEVICE_AUTO),
|
||||
::testing::ValuesIn(Autoconfigs)),
|
||||
OVInferRequestWaitTests::getTestCaseName);
|
||||
|
||||
} // namespace
|
@ -49,6 +49,7 @@ public:
|
||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||
std::tie(targetDevice, configuration) = this->GetParam();
|
||||
function = ngraph::builder::subgraph::makeConvPoolRelu();
|
||||
execNet = core->compile_model(function, targetDevice, configuration);
|
||||
}
|
||||
|
||||
void TearDown() override {
|
||||
@ -59,7 +60,7 @@ public:
|
||||
|
||||
protected:
|
||||
ov::runtime::ExecutableNetwork execNet;
|
||||
std::shared_ptr<ov::runtime::Core> core = utils::PluginCache::get().core();;
|
||||
std::shared_ptr<ov::runtime::Core> core = utils::PluginCache::get().core();
|
||||
std::string targetDevice;
|
||||
std::map<std::string, std::string> configuration;
|
||||
std::shared_ptr<ov::Function> function;
|
||||
|
@ -1,202 +0,0 @@
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include <chrono>
|
||||
#include <initializer_list>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <tuple>
|
||||
#include <vector>
|
||||
|
||||
#include "base/behavior_test_utils.hpp"
|
||||
#include "openvino/core/attribute_visitor.hpp"
|
||||
#include "openvino/core/function.hpp"
|
||||
#include "openvino/core/node.hpp"
|
||||
#include "openvino/core/partial_shape.hpp"
|
||||
#include "openvino/core/rank.hpp"
|
||||
#include "openvino/core/shape.hpp"
|
||||
#include "openvino/core/type/element_type.hpp"
|
||||
#include "openvino/core/type/element_type_traits.hpp"
|
||||
#include "openvino/op/parameter.hpp"
|
||||
#include "openvino/runtime/infer_request.hpp"
|
||||
#include "openvino/runtime/tensor.hpp"
|
||||
|
||||
namespace ov {
|
||||
namespace test {
|
||||
namespace behavior {
|
||||
|
||||
class OVInferenceChaining : public OVInferRequestTests {
|
||||
protected:
|
||||
static std::shared_ptr<ov::Function> getFirstStaticFunction(const ov::PartialShape &shape = {3}) {
|
||||
auto params = ngraph::builder::makeDynamicParams(element::Type_t::f32, {shape, shape, shape});
|
||||
params[0]->get_output_tensor(0).set_names({"input_tensor_0"});
|
||||
params[0]->set_friendly_name("param_0");
|
||||
params[1]->get_output_tensor(0).set_names({"input_tensor_1"});
|
||||
params[1]->set_friendly_name("param_1");
|
||||
params[2]->get_output_tensor(0).set_names({"input_tensor_2"});
|
||||
params[2]->set_friendly_name("param_2");
|
||||
auto eltwise = ngraph::builder::makeEltwise(params[0], params[1], ngraph::helpers::EltwiseTypes::ADD);
|
||||
auto eltwise2 = ngraph::builder::makeEltwise(eltwise, params[2], ngraph::helpers::EltwiseTypes::ADD);
|
||||
eltwise2->get_output_tensor(0).set_names({"result_tensor_0"});
|
||||
eltwise2->set_friendly_name("result_0");
|
||||
|
||||
return std::make_shared<ov::Function>(eltwise2, ov::ParameterVector(params));
|
||||
}
|
||||
|
||||
static std::shared_ptr<ov::Function> getSecondStaticFunction(const ov::PartialShape &shape = {3}) {
|
||||
auto params = ngraph::builder::makeDynamicParams(element::Type_t::f32, {shape, shape});
|
||||
params[0]->get_output_tensor(0).set_names({"input_tensor_0"});
|
||||
params[0]->set_friendly_name("param_0");
|
||||
params[1]->get_output_tensor(0).set_names({"input_tensor_1"});
|
||||
params[1]->set_friendly_name("param_1");
|
||||
auto eltwise = ngraph::builder::makeEltwise(params[0], params[1], ngraph::helpers::EltwiseTypes::MULTIPLY);
|
||||
eltwise->get_output_tensor(0).set_names({"result_tensor_0"});
|
||||
eltwise->set_friendly_name("result_0");
|
||||
|
||||
return std::make_shared<ov::Function>(eltwise, ov::ParameterVector(params));
|
||||
}
|
||||
|
||||
static std::shared_ptr<ov::Function> getThirdStaticFunction(const ov::PartialShape &shape = {3}) {
|
||||
auto params = ngraph::builder::makeDynamicParams(element::Type_t::f32, {shape, shape, shape, shape});
|
||||
params[0]->get_output_tensor(0).set_names({"input_tensor_0"});
|
||||
params[0]->set_friendly_name("param_0");
|
||||
params[1]->get_output_tensor(0).set_names({"input_tensor_1"});
|
||||
params[1]->set_friendly_name("param_1");
|
||||
params[2]->get_output_tensor(0).set_names({"input_tensor_2"});
|
||||
params[2]->set_friendly_name("param_2");
|
||||
params[3]->get_output_tensor(0).set_names({"input_tensor_3"});
|
||||
params[3]->set_friendly_name("param_3");
|
||||
auto eltwise = ngraph::builder::makeEltwise(params[0], params[1], ngraph::helpers::EltwiseTypes::ADD);
|
||||
auto eltwise2 = ngraph::builder::makeEltwise(eltwise, params[2], ngraph::helpers::EltwiseTypes::ADD);
|
||||
auto eltwise3 = ngraph::builder::makeEltwise(eltwise2, params[3], ngraph::helpers::EltwiseTypes::MULTIPLY);
|
||||
eltwise3->get_output_tensor(0).set_names({"result_tensor_0"});
|
||||
eltwise3->set_friendly_name("result_0");
|
||||
|
||||
return std::make_shared<ov::Function>(eltwise3, ov::ParameterVector(params));
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
ov::runtime::Tensor tensor(const std::vector<T> &v) {
|
||||
auto type = ov::element::from<T>();
|
||||
ov::runtime::Tensor tensor(type, {v.size()});
|
||||
std::memcpy(tensor.data(), v.data(), v.size() * type.size());
|
||||
|
||||
return tensor;
|
||||
}
|
||||
|
||||
std::shared_ptr<ov::Function> function0;
|
||||
std::shared_ptr<ov::Function> function1;
|
||||
std::shared_ptr<ov::Function> function2;
|
||||
|
||||
bool outputToInput = true;
|
||||
|
||||
public:
|
||||
void Run() {
|
||||
ov::runtime::ExecutableNetwork execNet0, execNet1, execNet2;
|
||||
ASSERT_NO_THROW(execNet0 = core->compile_model(function0, targetDevice, configuration));
|
||||
ASSERT_NO_THROW(execNet1 = core->compile_model(function1, targetDevice, configuration));
|
||||
ASSERT_NO_THROW(execNet2 = core->compile_model(function2, targetDevice, configuration));
|
||||
|
||||
ov::runtime::InferRequest r0, r1, r2;
|
||||
ASSERT_NO_THROW(r0 = execNet0.create_infer_request());
|
||||
ASSERT_NO_THROW(r1 = execNet1.create_infer_request());
|
||||
ASSERT_NO_THROW(r2 = execNet2.create_infer_request());
|
||||
|
||||
// perform inference chaining
|
||||
if (outputToInput) {
|
||||
ASSERT_NO_THROW(r1.set_tensor("input_tensor_0", r0.get_tensor("result_tensor_0")));
|
||||
} else {
|
||||
ASSERT_NO_THROW(r0.set_tensor("result_tensor_0", r1.get_tensor("input_tensor_0")));
|
||||
}
|
||||
|
||||
// create input tensors
|
||||
ov::runtime::Tensor t0 = tensor(std::vector<float>{1.0f, 2.0f, 3.0f});
|
||||
ov::runtime::Tensor t1 = tensor(std::vector<float>{4.0f, 5.0f, 6.0f});
|
||||
ov::runtime::Tensor t2 = tensor(std::vector<float>{7.0f, 8.0f, 9.0f});
|
||||
ov::runtime::Tensor t3 = tensor(std::vector<float>{2.0f, 3.0f, 2.0f});
|
||||
|
||||
ASSERT_NO_THROW(r0.set_tensor("input_tensor_0", t0));
|
||||
ASSERT_NO_THROW(r0.set_tensor("input_tensor_1", t1));
|
||||
ASSERT_NO_THROW(r0.set_tensor("input_tensor_2", t2));
|
||||
ASSERT_NO_THROW(r1.set_tensor("input_tensor_1", t3));
|
||||
|
||||
ASSERT_NO_THROW(r2.set_tensor("input_tensor_0", t0));
|
||||
ASSERT_NO_THROW(r2.set_tensor("input_tensor_1", t1));
|
||||
ASSERT_NO_THROW(r2.set_tensor("input_tensor_2", t2));
|
||||
ASSERT_NO_THROW(r2.set_tensor("input_tensor_3", t3));
|
||||
|
||||
ASSERT_NO_THROW(r0.infer());
|
||||
ASSERT_NO_THROW(r1.infer());
|
||||
ASSERT_NO_THROW(r2.infer());
|
||||
|
||||
// check results
|
||||
std::vector<float> reference1 = {12.0f, 15.0f, 18.0f};
|
||||
std::vector<float> reference2 = {24.0f, 45.0f, 36.0f};
|
||||
|
||||
auto rti = r0.get_tensor("result_tensor_0");
|
||||
auto rt0 = r1.get_tensor("result_tensor_0");
|
||||
auto rt1 = r2.get_tensor("result_tensor_0");
|
||||
|
||||
for (size_t i = 0; i < reference1.size(); ++i) {
|
||||
EXPECT_EQ(reference1[i], rti.data<float>()[i]);
|
||||
EXPECT_EQ(reference2[i], rt0.data<float>()[i]);
|
||||
EXPECT_EQ(reference2[i], rt1.data<float>()[i]);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
TEST_P(OVInferenceChaining, StaticOutputToStaticInput) {
|
||||
// Skip test according to plugin specific disabledTestPatterns() (if any)
|
||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||
|
||||
function0 = getFirstStaticFunction();
|
||||
function1 = getSecondStaticFunction();
|
||||
function2 = getThirdStaticFunction();
|
||||
|
||||
Run();
|
||||
}
|
||||
|
||||
TEST_P(OVInferenceChaining, StaticOutputToDynamicInput) {
|
||||
// Skip test according to plugin specific disabledTestPatterns() (if any)
|
||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||
|
||||
const auto dynamic = ov::PartialShape::dynamic(ov::Rank(1));
|
||||
function0 = getFirstStaticFunction();
|
||||
function1 = getSecondStaticFunction(dynamic);
|
||||
function2 = getThirdStaticFunction(dynamic);
|
||||
|
||||
Run();
|
||||
}
|
||||
|
||||
TEST_P(OVInferenceChaining, DynamicOutputToDynamicInput) {
|
||||
// Skip test according to plugin specific disabledTestPatterns() (if any)
|
||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||
|
||||
const auto dynamic = ov::PartialShape::dynamic();
|
||||
function0 = getFirstStaticFunction(dynamic);
|
||||
function1 = getSecondStaticFunction(dynamic);
|
||||
function2 = getThirdStaticFunction(dynamic);
|
||||
|
||||
Run();
|
||||
}
|
||||
|
||||
TEST_P(OVInferenceChaining, DynamicInputToDynamicOutput) {
|
||||
// Skip test according to plugin specific disabledTestPatterns() (if any)
|
||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||
|
||||
this->outputToInput = false;
|
||||
|
||||
const auto dynamic = ov::PartialShape::dynamic();
|
||||
function0 = getFirstStaticFunction(dynamic);
|
||||
function1 = getSecondStaticFunction(dynamic);
|
||||
function2 = getThirdStaticFunction(dynamic);
|
||||
|
||||
Run();
|
||||
}
|
||||
|
||||
} // namespace behavior
|
||||
} // namespace test
|
||||
} // namespace ov
|
@ -0,0 +1,17 @@
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "base/ov_behavior_test_utils.hpp"
|
||||
|
||||
namespace ov {
|
||||
namespace test {
|
||||
namespace behavior {
|
||||
struct OVInferRequestCallbackTests : public OVInferRequestTests {
|
||||
static std::string getTestCaseName(const testing::TestParamInfo<InferRequestParams>& obj);
|
||||
};
|
||||
} // namespace behavior
|
||||
} // namespace test
|
||||
} // namespace ov
|
@ -0,0 +1,19 @@
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <future>
|
||||
|
||||
#include "base/ov_behavior_test_utils.hpp"
|
||||
|
||||
namespace ov {
|
||||
namespace test {
|
||||
namespace behavior {
|
||||
struct OVInferRequestCancellationTests : public OVInferRequestTests {
|
||||
static std::string getTestCaseName(const testing::TestParamInfo<InferRequestParams>& obj);
|
||||
};
|
||||
} // namespace behavior
|
||||
} // namespace test
|
||||
} // namespace ov
|
@ -0,0 +1,62 @@
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <chrono>
|
||||
#include <future>
|
||||
#include <gtest/gtest.h>
|
||||
#include <tuple>
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <memory>
|
||||
#include "functional_test_utils/ov_plugin_cache.hpp"
|
||||
#include "ie_extension.h"
|
||||
#include <condition_variable>
|
||||
#include "openvino/core/shape.hpp"
|
||||
#include "shared_test_classes/base/layer_test_utils.hpp"
|
||||
#include "ngraph_functions/utils/ngraph_helpers.hpp"
|
||||
#include "ngraph_functions/builders.hpp"
|
||||
#include "transformations/utils/utils.hpp"
|
||||
#include <string>
|
||||
#include <ie_core.hpp>
|
||||
#include <thread>
|
||||
#include <base/behavior_test_utils.hpp>
|
||||
#include "common_test_utils/common_utils.hpp"
|
||||
#include "functional_test_utils/plugin_cache.hpp"
|
||||
#include "functional_test_utils/blob_utils.hpp"
|
||||
#include "ngraph_functions/subgraph_builders.hpp"
|
||||
#include "shared_test_classes/subgraph/basic_lstm.hpp"
|
||||
|
||||
// TODO [mandrono]: move current test case inside CPU plug-in and return the original tests
|
||||
namespace ov {
|
||||
namespace test {
|
||||
namespace behavior {
|
||||
|
||||
using OVInferRequestDynamicParams = std::tuple<
|
||||
std::shared_ptr<Function>, // ov function
|
||||
std::vector<std::pair<std::vector<size_t>, std::vector<size_t>>>, // input/expected output shapes per inference
|
||||
std::string, // Device name
|
||||
std::map<std::string, std::string> // Config
|
||||
>;
|
||||
|
||||
class OVInferRequestDynamicTests : public testing::WithParamInterface<OVInferRequestDynamicParams>,
|
||||
public CommonTestUtils::TestsCommon {
|
||||
public:
|
||||
static std::string getTestCaseName(testing::TestParamInfo<OVInferRequestDynamicParams> obj);
|
||||
|
||||
protected:
|
||||
void SetUp() override;
|
||||
|
||||
void TearDown() override;
|
||||
|
||||
std::shared_ptr<runtime::Core> ie = utils::PluginCache::get().core();
|
||||
std::shared_ptr<Function> function;
|
||||
std::string targetDevice;
|
||||
std::map<std::string, std::string> configuration;
|
||||
std::vector<std::pair<std::vector<size_t>, std::vector<size_t>>> inOutShapes;
|
||||
};
|
||||
} // namespace behavior
|
||||
} // namespace test
|
||||
} // namespace ov
|
@ -0,0 +1,61 @@
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include <chrono>
|
||||
#include <initializer_list>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <tuple>
|
||||
#include <vector>
|
||||
|
||||
#include "base/behavior_test_utils.hpp"
|
||||
#include "openvino/core/attribute_visitor.hpp"
|
||||
#include "openvino/core/function.hpp"
|
||||
#include "openvino/core/node.hpp"
|
||||
#include "openvino/core/partial_shape.hpp"
|
||||
#include "openvino/core/rank.hpp"
|
||||
#include "openvino/core/shape.hpp"
|
||||
#include "openvino/core/type/element_type.hpp"
|
||||
#include "openvino/core/type/element_type_traits.hpp"
|
||||
#include "openvino/op/parameter.hpp"
|
||||
#include "openvino/runtime/infer_request.hpp"
|
||||
#include "openvino/runtime/tensor.hpp"
|
||||
|
||||
namespace ov {
|
||||
namespace test {
|
||||
namespace behavior {
|
||||
|
||||
class OVInferenceChaining : public OVInferRequestTests {
|
||||
protected:
|
||||
static std::shared_ptr<ov::Function> getFirstStaticFunction(const ov::PartialShape &shape = {3});
|
||||
|
||||
static std::shared_ptr<ov::Function> getSecondStaticFunction(const ov::PartialShape &shape = {3});
|
||||
|
||||
static std::shared_ptr<ov::Function> getThirdStaticFunction(const ov::PartialShape &shape = {3});
|
||||
|
||||
template<typename T>
|
||||
ov::runtime::Tensor tensor(const std::vector<T> &v) {
|
||||
auto type = ov::element::from<T>();
|
||||
ov::runtime::Tensor tensor(type, {v.size()});
|
||||
std::memcpy(tensor.data(), v.data(), v.size() * type.size());
|
||||
|
||||
return tensor;
|
||||
}
|
||||
|
||||
std::shared_ptr<ov::Function> function0;
|
||||
std::shared_ptr<ov::Function> function1;
|
||||
std::shared_ptr<ov::Function> function2;
|
||||
|
||||
bool outputToInput = true;
|
||||
|
||||
public:
|
||||
static std::string getTestCaseName(const testing::TestParamInfo<InferRequestParams>& obj);
|
||||
|
||||
void Run();
|
||||
};
|
||||
} // namespace behavior
|
||||
} // namespace test
|
||||
} // namespace ov
|
@ -0,0 +1,46 @@
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <thread>
|
||||
#include <future>
|
||||
|
||||
#include "shared_test_classes/subgraph/basic_lstm.hpp"
|
||||
#include "base/ov_behavior_test_utils.hpp"
|
||||
|
||||
namespace ov {
|
||||
namespace test {
|
||||
namespace behavior {
|
||||
|
||||
struct OVInferRequestIOTensorTest : public OVInferRequestTests {
|
||||
static std::string getTestCaseName(const testing::TestParamInfo<InferRequestParams>& obj);
|
||||
void SetUp() override;
|
||||
void TearDown() override;
|
||||
runtime::InferRequest req;
|
||||
ov::Output<const ov::Node> input;
|
||||
ov::Output<const ov::Node> output;
|
||||
};
|
||||
|
||||
using OVInferRequestSetPrecisionParams = std::tuple<
|
||||
element::Type, // element type
|
||||
std::string, // Device name
|
||||
std::map<std::string, std::string> // Config
|
||||
>;
|
||||
struct OVInferRequestIOTensorSetPrecisionTest : public testing::WithParamInterface<OVInferRequestSetPrecisionParams>,
|
||||
public CommonTestUtils::TestsCommon {
|
||||
static std::string getTestCaseName(const testing::TestParamInfo<OVInferRequestSetPrecisionParams>& obj);
|
||||
void SetUp() override;
|
||||
void TearDown() override;
|
||||
std::shared_ptr<ov::runtime::Core> core = utils::PluginCache::get().core();
|
||||
std::shared_ptr<ov::Function> function;
|
||||
runtime::ExecutableNetwork execNet;
|
||||
runtime::InferRequest req;
|
||||
std::string target_device;
|
||||
runtime::ConfigMap config;
|
||||
element::Type element_type;
|
||||
};
|
||||
} // namespace behavior
|
||||
} // namespace test
|
||||
} // namespace ov
|
@ -0,0 +1,17 @@
|
||||
// Copyright (C) 2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "base/ov_behavior_test_utils.hpp"
|
||||
|
||||
namespace ov {
|
||||
namespace test {
|
||||
namespace behavior {
|
||||
struct OVInferRequestMultithreadingTests : public OVInferRequestTests {
|
||||
static std::string getTestCaseName(const testing::TestParamInfo<InferRequestParams>& obj);
|
||||
};
|
||||
} // namespace behavior
|
||||
} // namespace test
|
||||
} // namespace ov
|
@ -0,0 +1,19 @@
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "base/ov_behavior_test_utils.hpp"
|
||||
|
||||
namespace ov {
|
||||
namespace test {
|
||||
namespace behavior {
|
||||
struct OVInferRequestPerfCountersTest : public OVInferRequestTests {
|
||||
static std::string getTestCaseName(const testing::TestParamInfo<InferRequestParams>& obj);
|
||||
void SetUp() override;
|
||||
runtime::InferRequest req;
|
||||
};
|
||||
} // namespace behavior
|
||||
} // namespace test
|
||||
} // namespace ov
|
@ -0,0 +1,22 @@
|
||||
// Copyright (C) 2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "base/ov_behavior_test_utils.hpp"
|
||||
|
||||
namespace ov {
|
||||
namespace test {
|
||||
namespace behavior {
|
||||
struct OVInferRequestWaitTests : public OVInferRequestTests {
|
||||
static std::string getTestCaseName(const testing::TestParamInfo<InferRequestParams>& obj);
|
||||
void SetUp() override;
|
||||
void TearDown() override;
|
||||
runtime::InferRequest req;
|
||||
ov::Output<const ov::Node> input;
|
||||
ov::Output<const ov::Node> output;
|
||||
};
|
||||
} // namespace behavior
|
||||
} // namespace test
|
||||
} // namespace ov
|
@ -0,0 +1,132 @@
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <future>
|
||||
|
||||
#include "shared_test_classes/subgraph/basic_lstm.hpp"
|
||||
#include "behavior/ov_infer_request/callback.hpp"
|
||||
|
||||
namespace ov {
|
||||
namespace test {
|
||||
namespace behavior {
|
||||
|
||||
std::string OVInferRequestCallbackTests::getTestCaseName(const testing::TestParamInfo<InferRequestParams>& obj) {
|
||||
return OVInferRequestTests::getTestCaseName(obj);
|
||||
}
|
||||
|
||||
TEST_P(OVInferRequestCallbackTests, canCallAsyncWithCompletionCallback) {
|
||||
runtime::InferRequest req;
|
||||
OV_ASSERT_NO_THROW(req = execNet.create_infer_request());
|
||||
bool is_called = false;
|
||||
OV_ASSERT_NO_THROW(req.set_callback([&] (std::exception_ptr exception_ptr) {
|
||||
// HSD_1805940120: Wait on starting callback return HDDL_ERROR_INVAL_TASK_HANDLE
|
||||
ASSERT_EQ(exception_ptr, nullptr);
|
||||
is_called = true;
|
||||
}));
|
||||
OV_ASSERT_NO_THROW(req.start_async());
|
||||
OV_ASSERT_NO_THROW(req.wait());
|
||||
ASSERT_TRUE(is_called);
|
||||
}
|
||||
|
||||
TEST_P(OVInferRequestCallbackTests, syncInferDoesNotCallCompletionCallback) {
|
||||
runtime::InferRequest req;
|
||||
OV_ASSERT_NO_THROW(req = execNet.create_infer_request());
|
||||
bool is_called = false;
|
||||
req.set_callback([&] (std::exception_ptr exception_ptr) {
|
||||
ASSERT_EQ(nullptr, exception_ptr);
|
||||
is_called = true;
|
||||
});
|
||||
req.infer();
|
||||
ASSERT_FALSE(is_called);
|
||||
}
|
||||
|
||||
// test that can wait all callbacks on dtor
|
||||
TEST_P(OVInferRequestCallbackTests, canStartSeveralAsyncInsideCompletionCallbackWithSafeDtor) {
|
||||
const int NUM_ITER = 10;
|
||||
struct TestUserData {
|
||||
std::atomic<int> numIter = {0};
|
||||
std::promise<bool> promise;
|
||||
};
|
||||
TestUserData data;
|
||||
|
||||
runtime::InferRequest req;
|
||||
OV_ASSERT_NO_THROW(req = execNet.create_infer_request());
|
||||
OV_ASSERT_NO_THROW(req.set_callback([&] (std::exception_ptr exception_ptr) {
|
||||
if (exception_ptr) {
|
||||
data.promise.set_exception(exception_ptr);
|
||||
} else {
|
||||
if (data.numIter.fetch_add(1) != NUM_ITER) {
|
||||
req.start_async();
|
||||
} else {
|
||||
data.promise.set_value(true);
|
||||
}
|
||||
}
|
||||
}));
|
||||
auto future = data.promise.get_future();
|
||||
OV_ASSERT_NO_THROW(req.start_async());
|
||||
OV_ASSERT_NO_THROW(req.wait());
|
||||
future.wait();
|
||||
auto callbackStatus = future.get();
|
||||
ASSERT_TRUE(callbackStatus);
|
||||
auto dataNumIter = data.numIter - 1;
|
||||
ASSERT_EQ(NUM_ITER, dataNumIter);
|
||||
}
|
||||
|
||||
TEST_P(OVInferRequestCallbackTests, returnGeneralErrorIfCallbackThrowException) {
|
||||
runtime::InferRequest req;
|
||||
OV_ASSERT_NO_THROW(req = execNet.create_infer_request());
|
||||
OV_ASSERT_NO_THROW(req.set_callback([] (std::exception_ptr) {
|
||||
OPENVINO_UNREACHABLE("Throw");
|
||||
}));
|
||||
OV_ASSERT_NO_THROW(req.start_async());
|
||||
ASSERT_THROW(req.wait(), ov::Exception);
|
||||
}
|
||||
|
||||
TEST_P(OVInferRequestCallbackTests, ReturnResultNotReadyFromWaitInAsyncModeForTooSmallTimeout) {
|
||||
// GetNetwork(3000, 380) make inference around 20ms on GNA SW
|
||||
// so increases chances for getting RESULT_NOT_READY
|
||||
OV_ASSERT_NO_THROW(execNet = core->compile_model(
|
||||
SubgraphTestsDefinitions::Basic_LSTM_S::GetNetwork(300, 38), targetDevice, configuration));
|
||||
runtime::InferRequest req;
|
||||
OV_ASSERT_NO_THROW(req = execNet.create_infer_request());
|
||||
std::promise<std::chrono::system_clock::time_point> callbackTimeStamp;
|
||||
auto callbackTimeStampFuture = callbackTimeStamp.get_future();
|
||||
// add a callback to the request and capture the timestamp
|
||||
OV_ASSERT_NO_THROW(req.set_callback([&](std::exception_ptr exception_ptr) {
|
||||
if (exception_ptr) {
|
||||
callbackTimeStamp.set_exception(exception_ptr);
|
||||
} else {
|
||||
callbackTimeStamp.set_value(std::chrono::system_clock::now());
|
||||
}
|
||||
}));
|
||||
OV_ASSERT_NO_THROW(req.start_async());
|
||||
bool ready = false;
|
||||
OV_ASSERT_NO_THROW(ready = req.wait_for({}));
|
||||
// get timestamp taken AFTER return from the wait(STATUS_ONLY)
|
||||
const auto afterWaitTimeStamp = std::chrono::system_clock::now();
|
||||
// IF the callback timestamp is larger than the afterWaitTimeStamp
|
||||
// then we should observe false ready result
|
||||
if (afterWaitTimeStamp < callbackTimeStampFuture.get()) {
|
||||
ASSERT_FALSE(ready);
|
||||
}
|
||||
OV_ASSERT_NO_THROW(req.wait());
|
||||
}
|
||||
|
||||
TEST_P(OVInferRequestCallbackTests, ImplDoesNotCopyCallback) {
|
||||
runtime::InferRequest req;
|
||||
OV_ASSERT_NO_THROW(req = execNet.create_infer_request());
|
||||
{
|
||||
auto somePtr = std::make_shared<int>(42);
|
||||
OV_ASSERT_NO_THROW(req.set_callback([somePtr] (std::exception_ptr exception_ptr) {
|
||||
ASSERT_EQ(nullptr, exception_ptr);
|
||||
ASSERT_EQ(1, somePtr.use_count());
|
||||
}));
|
||||
}
|
||||
OV_ASSERT_NO_THROW(req.start_async());
|
||||
OV_ASSERT_NO_THROW(req.wait());
|
||||
}
|
||||
|
||||
} // namespace behavior
|
||||
} // namespace test
|
||||
} // namespace ov
|
@ -0,0 +1,66 @@
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <future>
|
||||
|
||||
#include "behavior/ov_infer_request/cancellation.hpp"
|
||||
#include "openvino/runtime/exception.hpp"
|
||||
|
||||
namespace ov {
|
||||
namespace test {
|
||||
namespace behavior {
|
||||
|
||||
std::string OVInferRequestCancellationTests::getTestCaseName(const testing::TestParamInfo<InferRequestParams>& obj) {
|
||||
return OVInferRequestTests::getTestCaseName(obj);
|
||||
}
|
||||
|
||||
TEST_P(OVInferRequestCancellationTests, canCancelAsyncRequest) {
|
||||
runtime::InferRequest req;
|
||||
OV_ASSERT_NO_THROW(req = execNet.create_infer_request());
|
||||
OV_ASSERT_NO_THROW(req.start_async());
|
||||
OV_ASSERT_NO_THROW(req.cancel());
|
||||
try {
|
||||
req.wait();
|
||||
} catch (const ov::runtime::Cancelled&) {
|
||||
SUCCEED();
|
||||
}
|
||||
}
|
||||
|
||||
TEST_P(OVInferRequestCancellationTests, CanResetAfterCancelAsyncRequest) {
|
||||
runtime::InferRequest req;
|
||||
OV_ASSERT_NO_THROW(req = execNet.create_infer_request());
|
||||
OV_ASSERT_NO_THROW(req.start_async());
|
||||
OV_ASSERT_NO_THROW(req.cancel());
|
||||
try {
|
||||
req.wait();
|
||||
} catch (const ov::runtime::Cancelled&) {
|
||||
SUCCEED();
|
||||
}
|
||||
OV_ASSERT_NO_THROW(req.start_async());
|
||||
OV_ASSERT_NO_THROW(req.wait());
|
||||
}
|
||||
|
||||
TEST_P(OVInferRequestCancellationTests, canCancelBeforeAsyncRequest) {
|
||||
runtime::InferRequest req;
|
||||
OV_ASSERT_NO_THROW(req = execNet.create_infer_request());
|
||||
OV_ASSERT_NO_THROW(req.cancel());
|
||||
}
|
||||
|
||||
TEST_P(OVInferRequestCancellationTests, canCancelInferRequest) {
|
||||
runtime::InferRequest req;
|
||||
OV_ASSERT_NO_THROW(req = execNet.create_infer_request());
|
||||
auto infer = std::async(std::launch::async, [&req]{req.infer();});
|
||||
while (!req.wait_for({})) {
|
||||
}
|
||||
OV_ASSERT_NO_THROW(req.cancel());
|
||||
try {
|
||||
infer.get();
|
||||
} catch (const ov::runtime::Cancelled&) {
|
||||
SUCCEED();
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace behavior
|
||||
} // namespace test
|
||||
} // namespace ov
|
@ -2,8 +2,6 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <chrono>
|
||||
#include <future>
|
||||
#include <gtest/gtest.h>
|
||||
@ -14,7 +12,6 @@
|
||||
#include "functional_test_utils/ov_plugin_cache.hpp"
|
||||
#include "ie_extension.h"
|
||||
#include <condition_variable>
|
||||
#include "openvino/core/partial_shape.hpp"
|
||||
#include "openvino/core/shape.hpp"
|
||||
#include "shared_test_classes/base/layer_test_utils.hpp"
|
||||
#include "ngraph_functions/utils/ngraph_helpers.hpp"
|
||||
@ -29,213 +26,198 @@
|
||||
#include "functional_test_utils/blob_utils.hpp"
|
||||
#include "ngraph_functions/subgraph_builders.hpp"
|
||||
#include "shared_test_classes/subgraph/basic_lstm.hpp"
|
||||
#include "behavior/ov_infer_request/infer_request_dynamic.hpp"
|
||||
|
||||
// TODO [mandrono]: move current test case inside CPU plug-in and return the original tests
|
||||
namespace BehaviorTestsDefinitions {
|
||||
namespace ov {
|
||||
namespace test {
|
||||
namespace behavior {
|
||||
|
||||
typedef std::tuple<
|
||||
std::shared_ptr<ov::Function>, // ov function
|
||||
std::vector<std::pair<std::vector<size_t>, std::vector<size_t>>>, // input/expected output shapes per inference
|
||||
std::string, // Device name
|
||||
std::map<std::string, std::string> // Config
|
||||
> InferRequestDynamicParams;
|
||||
|
||||
class InferRequestDynamicTests : public testing::WithParamInterface<InferRequestDynamicParams>,
|
||||
public CommonTestUtils::TestsCommon {
|
||||
public:
|
||||
static std::string getTestCaseName(testing::TestParamInfo<InferRequestDynamicParams> obj) {
|
||||
std::shared_ptr<ov::Function> func;
|
||||
std::vector<std::pair<std::vector<size_t>, std::vector<size_t>>> inOutShapes;
|
||||
std::string targetDevice;
|
||||
std::map<std::string, std::string> configuration;
|
||||
std::tie(func, inOutShapes, targetDevice, configuration) = obj.param;
|
||||
std::ostringstream result;
|
||||
result << "function=" << func->get_friendly_name() << "_";
|
||||
result << "inOutShape=(";
|
||||
for (const auto& inOutShape : inOutShapes) {
|
||||
result << "(" << CommonTestUtils::vec2str(inOutShape.first) << "_" << CommonTestUtils::vec2str(inOutShape.second) << ")";
|
||||
}
|
||||
result << ")_";
|
||||
result << "targetDevice=" << targetDevice;
|
||||
if (!configuration.empty()) {
|
||||
for (auto& configItem : configuration) {
|
||||
result << "configItem=" << configItem.first << "_" << configItem.second << "_";
|
||||
}
|
||||
}
|
||||
return result.str();
|
||||
}
|
||||
|
||||
protected:
|
||||
void SetUp() override {
|
||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||
std::tie(function, inOutShapes, targetDevice, configuration) = this->GetParam();
|
||||
}
|
||||
|
||||
void TearDown() override {
|
||||
if (!configuration.empty()) {
|
||||
ov::test::utils::PluginCache::get().reset();
|
||||
}
|
||||
}
|
||||
|
||||
std::shared_ptr<ov::runtime::Core> ie = ov::test::utils::PluginCache::get().core();
|
||||
std::shared_ptr<ov::Function> function;
|
||||
std::string OVInferRequestDynamicTests::getTestCaseName(testing::TestParamInfo<OVInferRequestDynamicParams> obj) {
|
||||
std::shared_ptr<Function> func;
|
||||
std::vector<std::pair<std::vector<size_t>, std::vector<size_t>>> inOutShapes;
|
||||
std::string targetDevice;
|
||||
std::map<std::string, std::string> configuration;
|
||||
std::vector<std::pair<std::vector<size_t>, std::vector<size_t>>> inOutShapes;
|
||||
};
|
||||
std::tie(func, inOutShapes, targetDevice, configuration) = obj.param;
|
||||
std::ostringstream result;
|
||||
result << "function=" << func->get_friendly_name() << "_";
|
||||
result << "inOutShape=(";
|
||||
for (const auto& inOutShape : inOutShapes) {
|
||||
result << "(" << CommonTestUtils::vec2str(inOutShape.first) << "_" << CommonTestUtils::vec2str(inOutShape.second) << ")";
|
||||
}
|
||||
result << ")_";
|
||||
result << "targetDevice=" << targetDevice;
|
||||
if (!configuration.empty()) {
|
||||
for (auto& configItem : configuration) {
|
||||
result << "configItem=" << configItem.first << "_" << configItem.second << "_";
|
||||
}
|
||||
}
|
||||
return result.str();
|
||||
}
|
||||
|
||||
TEST_P(InferRequestDynamicTests, InferDynamicNetworkWithoutSetShape) {
|
||||
void OVInferRequestDynamicTests::SetUp() {
|
||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||
std::tie(function, inOutShapes, targetDevice, configuration) = this->GetParam();
|
||||
}
|
||||
|
||||
void OVInferRequestDynamicTests::TearDown() {
|
||||
if (!configuration.empty()) {
|
||||
PluginCache::get().reset();
|
||||
}
|
||||
function.reset();
|
||||
}
|
||||
|
||||
TEST_P(OVInferRequestDynamicTests, InferDynamicNetworkWithoutSetShape) {
|
||||
const std::string tensor_name = "input_tensor";
|
||||
std::map<std::string, ov::PartialShape> shapes;
|
||||
shapes[tensor_name] = {ov::Dimension::dynamic(), 4, 20, 20};
|
||||
ASSERT_NO_THROW(function->reshape(shapes));
|
||||
OV_ASSERT_NO_THROW(function->reshape(shapes));
|
||||
// Load ov::Function to target plugins
|
||||
auto execNet = ie->compile_model(function, targetDevice, configuration);
|
||||
// Create InferRequest
|
||||
ov::runtime::InferRequest req;
|
||||
ov::runtime::Tensor tensor;
|
||||
ASSERT_NO_THROW(req = execNet.create_infer_request());
|
||||
ASSERT_NO_THROW(tensor = req.get_tensor(function->inputs().back().get_any_name()));
|
||||
OV_ASSERT_NO_THROW(req = execNet.create_infer_request());
|
||||
OV_ASSERT_NO_THROW(tensor = req.get_tensor(function->inputs().back().get_any_name()));
|
||||
}
|
||||
|
||||
TEST_P(InferRequestDynamicTests, InferDynamicNetworkBoundWithoutSetShape) {
|
||||
TEST_P(OVInferRequestDynamicTests, InferDynamicNetworkBoundWithoutSetShape) {
|
||||
const std::string tensor_name = "input_tensor";
|
||||
std::map<std::string, ov::PartialShape> shapes;
|
||||
shapes[tensor_name] = {ov::Dimension(0, 5), 4, 20, 20};
|
||||
ASSERT_NO_THROW(function->reshape(shapes));
|
||||
OV_ASSERT_NO_THROW(function->reshape(shapes));
|
||||
// Load ov::Function to target plugins
|
||||
auto execNet = ie->compile_model(function, targetDevice, configuration);
|
||||
// Create InferRequest
|
||||
ov::runtime::InferRequest req;
|
||||
ov::runtime::Tensor tensor;
|
||||
ASSERT_NO_THROW(req = execNet.create_infer_request());
|
||||
ASSERT_NO_THROW(tensor = req.get_tensor(function->inputs().back().get_any_name()));
|
||||
OV_ASSERT_NO_THROW(req = execNet.create_infer_request());
|
||||
OV_ASSERT_NO_THROW(tensor = req.get_tensor(function->inputs().back().get_any_name()));
|
||||
}
|
||||
|
||||
|
||||
TEST_P(InferRequestDynamicTests, InferDynamicNetworkWithGetTensor) {
|
||||
TEST_P(OVInferRequestDynamicTests, InferDynamicNetworkWithGetTensor) {
|
||||
const std::string tensor_name = "input_tensor";
|
||||
const ov::Shape refShape = inOutShapes[0].first;
|
||||
const ov::Shape refOutShape = inOutShapes[0].second;
|
||||
std::map<std::string, ov::PartialShape> shapes;
|
||||
shapes[tensor_name] = {ov::Dimension::dynamic(), 4, 20, 20};
|
||||
ASSERT_NO_THROW(function->reshape(shapes));
|
||||
OV_ASSERT_NO_THROW(function->reshape(shapes));
|
||||
// Load ov::Function to target plugins
|
||||
auto execNet = ie->compile_model(function, targetDevice, configuration);
|
||||
// Create InferRequest
|
||||
ov::runtime::InferRequest req;
|
||||
ov::runtime::Tensor tensor, otensor;
|
||||
const std::string outputname = function->outputs().back().get_any_name();
|
||||
ASSERT_NO_THROW(req = execNet.create_infer_request());
|
||||
//ASSERT_NO_THROW(req.SetShape(tensor_name, {1, 4, 20, 20}));
|
||||
ASSERT_NO_THROW(tensor = req.get_tensor(function->inputs().back().get_any_name()));
|
||||
ASSERT_NO_THROW(tensor.set_shape({1, 4, 20, 20}));
|
||||
OV_ASSERT_NO_THROW(req = execNet.create_infer_request());
|
||||
//OV_ASSERT_NO_THROW(req.SetShape(tensor_name, {1, 4, 20, 20}));
|
||||
OV_ASSERT_NO_THROW(tensor = req.get_tensor(function->inputs().back().get_any_name()));
|
||||
OV_ASSERT_NO_THROW(tensor.set_shape({1, 4, 20, 20}));
|
||||
ASSERT_EQ(tensor.get_shape(), refShape);
|
||||
ASSERT_NO_THROW(otensor = req.get_tensor(outputname));
|
||||
OV_ASSERT_NO_THROW(otensor = req.get_tensor(outputname));
|
||||
ASSERT_EQ(0, otensor.get_size()); // output tensor is not allocated
|
||||
ASSERT_EQ(function->output().get_element_type(), otensor.get_element_type()); // by it has type
|
||||
ASSERT_NO_THROW(req.infer());
|
||||
ASSERT_NO_THROW(req.start_async());
|
||||
ASSERT_NO_THROW(req.wait());
|
||||
OV_ASSERT_NO_THROW(req.infer());
|
||||
OV_ASSERT_NO_THROW(req.start_async());
|
||||
OV_ASSERT_NO_THROW(req.wait());
|
||||
EXPECT_NE(0, otensor.get_size()); // output tensor is allocated after infer
|
||||
ASSERT_NO_THROW(otensor = req.get_tensor(outputname));
|
||||
OV_ASSERT_NO_THROW(otensor = req.get_tensor(outputname));
|
||||
ASSERT_EQ(otensor.get_shape(), refOutShape);
|
||||
}
|
||||
|
||||
TEST_P(InferRequestDynamicTests, InferUpperBoundNetworkWithGetTensor) {
|
||||
TEST_P(OVInferRequestDynamicTests, InferUpperBoundNetworkWithGetTensor) {
|
||||
const std::string tensor_name = "input_tensor";
|
||||
const ov::Shape refShape = inOutShapes[0].first;
|
||||
const ov::Shape refOutShape = inOutShapes[0].second;
|
||||
std::map<std::string, ov::PartialShape> shapes;
|
||||
shapes[tensor_name] = {ov::Dimension(0, 19), 4, 20, 20};
|
||||
ASSERT_NO_THROW(function->reshape(shapes));
|
||||
OV_ASSERT_NO_THROW(function->reshape(shapes));
|
||||
// Load ov::Function to target plugins
|
||||
auto execNet = ie->compile_model(function, targetDevice, configuration);
|
||||
// Create InferRequest
|
||||
ov::runtime::InferRequest req;
|
||||
ov::runtime::Tensor tensor, otensor;
|
||||
const std::string outputname = function->outputs().back().get_any_name();
|
||||
ASSERT_NO_THROW(req = execNet.create_infer_request());
|
||||
//ASSERT_NO_THROW(req.SetShape(tensor_name, {1, 4, 20, 20}));
|
||||
ASSERT_NO_THROW(otensor = req.get_tensor(outputname));
|
||||
OV_ASSERT_NO_THROW(req = execNet.create_infer_request());
|
||||
//OV_ASSERT_NO_THROW(req.SetShape(tensor_name, {1, 4, 20, 20}));
|
||||
OV_ASSERT_NO_THROW(otensor = req.get_tensor(outputname));
|
||||
ASSERT_EQ(0, otensor.get_size()); // output tensor is not allocated
|
||||
ASSERT_EQ(function->output().get_element_type(), otensor.get_element_type()); // by it has type
|
||||
ASSERT_NO_THROW(tensor = req.get_tensor(function->inputs().back().get_any_name()));
|
||||
ASSERT_NO_THROW(tensor.set_shape({1, 4, 20, 20}));
|
||||
OV_ASSERT_NO_THROW(tensor = req.get_tensor(function->inputs().back().get_any_name()));
|
||||
OV_ASSERT_NO_THROW(tensor.set_shape({1, 4, 20, 20}));
|
||||
ASSERT_EQ(tensor.get_shape(), refShape);
|
||||
ASSERT_NO_THROW(req.infer());
|
||||
ASSERT_NO_THROW(req.start_async());
|
||||
ASSERT_NO_THROW(req.wait());
|
||||
OV_ASSERT_NO_THROW(req.infer());
|
||||
OV_ASSERT_NO_THROW(req.start_async());
|
||||
OV_ASSERT_NO_THROW(req.wait());
|
||||
ASSERT_EQ(otensor.get_shape(), refOutShape);
|
||||
}
|
||||
|
||||
TEST_P(InferRequestDynamicTests, InferFullyDynamicNetworkWithGetTensor) {
|
||||
TEST_P(OVInferRequestDynamicTests, InferFullyDynamicNetworkWithGetTensor) {
|
||||
const std::string tensor_name = "input_tensor";
|
||||
const ov::Shape refShape = inOutShapes[0].first;
|
||||
const ov::Shape refOutShape = inOutShapes[0].second;
|
||||
std::map<std::string, ov::PartialShape> shapes;
|
||||
shapes[tensor_name] = ov::PartialShape::dynamic();
|
||||
ASSERT_NO_THROW(function->reshape(shapes));
|
||||
OV_ASSERT_NO_THROW(function->reshape(shapes));
|
||||
// Load ov::Function to target plugins
|
||||
auto execNet = ie->compile_model(function, targetDevice, configuration);
|
||||
// Create InferRequest
|
||||
ov::runtime::InferRequest req;
|
||||
ov::runtime::Tensor tensor, otensor;
|
||||
const std::string outputName = function->outputs().back().get_any_name();
|
||||
ASSERT_NO_THROW(req = execNet.create_infer_request());
|
||||
//ASSERT_NO_THROW(req.SetShape(tensor_name, {1, 4, 20, 20}));
|
||||
ASSERT_NO_THROW(tensor = req.get_tensor(function->inputs().back().get_any_name()));
|
||||
ASSERT_NO_THROW(tensor.set_shape({1, 4, 20, 20}));
|
||||
OV_ASSERT_NO_THROW(req = execNet.create_infer_request());
|
||||
//OV_ASSERT_NO_THROW(req.SetShape(tensor_name, {1, 4, 20, 20}));
|
||||
OV_ASSERT_NO_THROW(tensor = req.get_tensor(function->inputs().back().get_any_name()));
|
||||
OV_ASSERT_NO_THROW(tensor.set_shape({1, 4, 20, 20}));
|
||||
ASSERT_EQ(tensor.get_shape(), refShape);
|
||||
ASSERT_NO_THROW(otensor = req.get_tensor(outputName));
|
||||
OV_ASSERT_NO_THROW(otensor = req.get_tensor(outputName));
|
||||
ASSERT_EQ(0, otensor.get_size()); // output tensor is not allocated
|
||||
ASSERT_EQ(function->output().get_element_type(), otensor.get_element_type()); // by it has type
|
||||
ASSERT_NO_THROW(req.infer());
|
||||
ASSERT_NO_THROW(req.start_async());
|
||||
ASSERT_NO_THROW(req.wait());
|
||||
ASSERT_NO_THROW(otensor = req.get_tensor(outputName));
|
||||
OV_ASSERT_NO_THROW(req.infer());
|
||||
OV_ASSERT_NO_THROW(req.start_async());
|
||||
OV_ASSERT_NO_THROW(req.wait());
|
||||
OV_ASSERT_NO_THROW(otensor = req.get_tensor(outputName));
|
||||
ASSERT_EQ(otensor.get_shape(), refOutShape);
|
||||
}
|
||||
|
||||
TEST_P(InferRequestDynamicTests, InferOutOfRangeShapeNetworkWithGetTensorLower) {
|
||||
TEST_P(OVInferRequestDynamicTests, InferOutOfRangeShapeNetworkWithGetTensorLower) {
|
||||
const std::string tensor_name = "input_tensor";
|
||||
const ov::Shape refShape = inOutShapes[0].first;
|
||||
const ov::Shape refOutShape = inOutShapes[0].second;
|
||||
std::map<std::string, ov::PartialShape> shapes;
|
||||
shapes[tensor_name] = {ov::Dimension(2, 3), 4, 20, 20};
|
||||
ASSERT_NO_THROW(function->reshape(shapes));
|
||||
OV_ASSERT_NO_THROW(function->reshape(shapes));
|
||||
// Load ov::Function to target plugins
|
||||
auto execNet = ie->compile_model(function, targetDevice, configuration);
|
||||
// Create InferRequest
|
||||
ov::runtime::InferRequest req;
|
||||
ov::runtime::Tensor tensor;
|
||||
ASSERT_NO_THROW(req = execNet.create_infer_request());
|
||||
ASSERT_NO_THROW(tensor = req.get_tensor(function->inputs().back().get_any_name()));
|
||||
ASSERT_NO_THROW(tensor.set_shape({1, 4, 20, 20}));
|
||||
OV_ASSERT_NO_THROW(req = execNet.create_infer_request());
|
||||
OV_ASSERT_NO_THROW(tensor = req.get_tensor(function->inputs().back().get_any_name()));
|
||||
OV_ASSERT_NO_THROW(tensor.set_shape({1, 4, 20, 20}));
|
||||
// Plugin may or may not throw in case if input tensor has dimensions that are out of bounds
|
||||
//ASSERT_THROW(req.infer(), ov::Exception);
|
||||
}
|
||||
|
||||
TEST_P(InferRequestDynamicTests, InferOutOfRangeShapeNetworkWithGetTensorUpper) {
|
||||
TEST_P(OVInferRequestDynamicTests, InferOutOfRangeShapeNetworkWithGetTensorUpper) {
|
||||
const std::string tensor_name = "input_tensor";
|
||||
const ov::Shape refShape = inOutShapes[0].first;
|
||||
const ov::Shape refOutShape = inOutShapes[0].second;
|
||||
std::map<std::string, ov::PartialShape> shapes;
|
||||
shapes[tensor_name] = {ov::Dimension(1, 2), 4, 20, 20};
|
||||
ASSERT_NO_THROW(function->reshape(shapes));
|
||||
OV_ASSERT_NO_THROW(function->reshape(shapes));
|
||||
// Load ov::Function to target plugins
|
||||
auto execNet = ie->compile_model(function, targetDevice, configuration);
|
||||
// Create InferRequest
|
||||
ov::runtime::InferRequest req;
|
||||
ov::runtime::Tensor tensor;
|
||||
ASSERT_NO_THROW(req = execNet.create_infer_request());
|
||||
ASSERT_NO_THROW(tensor = req.get_tensor(function->inputs().back().get_any_name()));
|
||||
ASSERT_NO_THROW(tensor.set_shape({3, 4, 20, 20}));
|
||||
OV_ASSERT_NO_THROW(req = execNet.create_infer_request());
|
||||
OV_ASSERT_NO_THROW(tensor = req.get_tensor(function->inputs().back().get_any_name()));
|
||||
OV_ASSERT_NO_THROW(tensor.set_shape({3, 4, 20, 20}));
|
||||
// Plugin may or may not throw in case if input tensor has dimensions that are out of bounds
|
||||
// ASSERT_THROW(req.infer(), ov::Exception);
|
||||
}
|
||||
|
||||
TEST_P(InferRequestDynamicTests, InferDynamicNetworkWithGetTensor2times) {
|
||||
TEST_P(OVInferRequestDynamicTests, InferDynamicNetworkWithGetTensor2times) {
|
||||
const std::string tensor_name = "input_tensor";
|
||||
const ov::Shape refShape = inOutShapes[0].first;
|
||||
const ov::Shape refShape2 = inOutShapes[1].first;
|
||||
@ -243,106 +225,106 @@ TEST_P(InferRequestDynamicTests, InferDynamicNetworkWithGetTensor2times) {
|
||||
const ov::Shape refOutShape2 = inOutShapes[1].second;
|
||||
std::map<std::string, ov::PartialShape> shapes;
|
||||
shapes[tensor_name] = {ov::Dimension::dynamic(), 4, 20, 20};
|
||||
ASSERT_NO_THROW(function->reshape(shapes));
|
||||
OV_ASSERT_NO_THROW(function->reshape(shapes));
|
||||
// Load ov::Function to target plugins
|
||||
auto execNet = ie->compile_model(function, targetDevice, configuration);
|
||||
// Create InferRequest
|
||||
ov::runtime::InferRequest req;
|
||||
ov::runtime::Tensor tensor;
|
||||
ASSERT_NO_THROW(req = execNet.create_infer_request());
|
||||
ASSERT_NO_THROW(tensor = req.get_tensor(function->inputs().back().get_any_name()));
|
||||
ASSERT_NO_THROW(tensor.set_shape(refShape));
|
||||
OV_ASSERT_NO_THROW(req = execNet.create_infer_request());
|
||||
OV_ASSERT_NO_THROW(tensor = req.get_tensor(function->inputs().back().get_any_name()));
|
||||
OV_ASSERT_NO_THROW(tensor.set_shape(refShape));
|
||||
ASSERT_EQ(tensor.get_shape(), refShape);
|
||||
ASSERT_NO_THROW(req.infer());
|
||||
ASSERT_NO_THROW(req.start_async());
|
||||
OV_ASSERT_NO_THROW(req.infer());
|
||||
OV_ASSERT_NO_THROW(req.start_async());
|
||||
req.wait();
|
||||
const std::string outputName = function->outputs().back().get_any_name();
|
||||
ASSERT_NO_THROW(tensor = req.get_tensor(outputName));
|
||||
OV_ASSERT_NO_THROW(tensor = req.get_tensor(outputName));
|
||||
ASSERT_EQ(tensor.get_shape(), refOutShape);
|
||||
|
||||
ASSERT_NO_THROW(tensor = req.get_tensor(function->inputs().back().get_any_name()));
|
||||
ASSERT_NO_THROW(tensor.set_shape(refShape2));
|
||||
OV_ASSERT_NO_THROW(tensor = req.get_tensor(function->inputs().back().get_any_name()));
|
||||
OV_ASSERT_NO_THROW(tensor.set_shape(refShape2));
|
||||
ASSERT_EQ(tensor.get_shape(), refShape2);
|
||||
ASSERT_NO_THROW(req.infer());
|
||||
ASSERT_NO_THROW(req.start_async());
|
||||
OV_ASSERT_NO_THROW(req.infer());
|
||||
OV_ASSERT_NO_THROW(req.start_async());
|
||||
req.wait();
|
||||
ASSERT_NO_THROW(tensor = req.get_tensor(outputName));
|
||||
OV_ASSERT_NO_THROW(tensor = req.get_tensor(outputName));
|
||||
ASSERT_EQ(tensor.get_shape(), refOutShape2);
|
||||
}
|
||||
|
||||
|
||||
TEST_P(InferRequestDynamicTests, GetSameTensor2times) {
|
||||
TEST_P(OVInferRequestDynamicTests, GetSameTensor2times) {
|
||||
const std::string tensor_name = "input_tensor";
|
||||
const ov::Shape refShape = inOutShapes[0].first;
|
||||
std::map<std::string, ov::PartialShape> shapes;
|
||||
shapes[tensor_name] = {ov::Dimension::dynamic(), 4, 20, 20};
|
||||
ASSERT_NO_THROW(function->reshape(shapes));
|
||||
OV_ASSERT_NO_THROW(function->reshape(shapes));
|
||||
// Load ov::Function to target plugins
|
||||
auto execNet = ie->compile_model(function, targetDevice, configuration);
|
||||
// Create InferRequest
|
||||
ov::runtime::InferRequest req;
|
||||
ov::runtime::Tensor tensor;
|
||||
ASSERT_NO_THROW(req = execNet.create_infer_request());
|
||||
ASSERT_NO_THROW(tensor = req.get_tensor(function->inputs().back().get_any_name()));
|
||||
ASSERT_NO_THROW(tensor.set_shape(refShape));
|
||||
OV_ASSERT_NO_THROW(req = execNet.create_infer_request());
|
||||
OV_ASSERT_NO_THROW(tensor = req.get_tensor(function->inputs().back().get_any_name()));
|
||||
OV_ASSERT_NO_THROW(tensor.set_shape(refShape));
|
||||
ASSERT_EQ(tensor.get_shape(), refShape);
|
||||
ASSERT_NO_THROW(tensor = req.get_tensor(function->inputs().back().get_any_name()));
|
||||
OV_ASSERT_NO_THROW(tensor = req.get_tensor(function->inputs().back().get_any_name()));
|
||||
ASSERT_EQ(tensor.get_shape(), refShape);
|
||||
}
|
||||
|
||||
TEST_P(InferRequestDynamicTests, InferDynamicNetworkWithSetTensor) {
|
||||
TEST_P(OVInferRequestDynamicTests, InferDynamicNetworkWithSetTensor) {
|
||||
const std::string tensor_name = "input_tensor";
|
||||
const ov::Shape refShape = inOutShapes[0].first;
|
||||
const ov::Shape refOutShape = inOutShapes[0].second;
|
||||
std::map<std::string, ov::PartialShape> shapes;
|
||||
shapes[tensor_name] = {ov::Dimension::dynamic(), 4, 20, 20};
|
||||
ASSERT_NO_THROW(function->reshape(shapes));
|
||||
OV_ASSERT_NO_THROW(function->reshape(shapes));
|
||||
// Load ov::Function to target plugins
|
||||
auto execNet = ie->compile_model(function, targetDevice, configuration);
|
||||
// Create InferRequest
|
||||
ov::runtime::InferRequest req;
|
||||
ov::runtime::Tensor tensor(ov::element::f32, refShape);
|
||||
ASSERT_NO_THROW(req = execNet.create_infer_request());
|
||||
ASSERT_NO_THROW(req.set_tensor(function->inputs().back().get_any_name(), tensor));
|
||||
OV_ASSERT_NO_THROW(req = execNet.create_infer_request());
|
||||
OV_ASSERT_NO_THROW(req.set_tensor(function->inputs().back().get_any_name(), tensor));
|
||||
ASSERT_EQ(tensor.get_shape(), refShape);
|
||||
ASSERT_NO_THROW(req.infer());
|
||||
ASSERT_NO_THROW(req.start_async());
|
||||
ASSERT_NO_THROW(req.wait());
|
||||
OV_ASSERT_NO_THROW(req.infer());
|
||||
OV_ASSERT_NO_THROW(req.start_async());
|
||||
OV_ASSERT_NO_THROW(req.wait());
|
||||
const std::string outputName = function->outputs().back().get_any_name();
|
||||
ASSERT_NO_THROW(tensor = req.get_tensor(outputName));
|
||||
OV_ASSERT_NO_THROW(tensor = req.get_tensor(outputName));
|
||||
ASSERT_EQ(tensor.get_shape(), refOutShape);
|
||||
}
|
||||
|
||||
TEST_P(InferRequestDynamicTests, InferFullyDynamicNetworkWithSetTensor) {
|
||||
TEST_P(OVInferRequestDynamicTests, InferFullyDynamicNetworkWithSetTensor) {
|
||||
const std::string tensor_name = "input_tensor";
|
||||
const ov::Shape refShape = inOutShapes[0].first;
|
||||
const ov::Shape refOutShape = inOutShapes[0].second;
|
||||
std::map<std::string, ov::PartialShape> shapes;
|
||||
shapes[tensor_name] = ov::PartialShape::dynamic();
|
||||
ASSERT_NO_THROW(function->reshape(shapes));
|
||||
OV_ASSERT_NO_THROW(function->reshape(shapes));
|
||||
// Load ov::Function to target plugins
|
||||
auto execNet = ie->compile_model(function, targetDevice, configuration);
|
||||
// Create InferRequest
|
||||
ov::runtime::InferRequest req;
|
||||
ov::runtime::Tensor tensor(ov::element::f32, refShape), otensor;
|
||||
const std::string outputName = function->outputs().back().get_any_name();
|
||||
ASSERT_NO_THROW(req = execNet.create_infer_request());
|
||||
ASSERT_NO_THROW(req.set_tensor(function->inputs().back().get_any_name(), tensor));
|
||||
OV_ASSERT_NO_THROW(req = execNet.create_infer_request());
|
||||
OV_ASSERT_NO_THROW(req.set_tensor(function->inputs().back().get_any_name(), tensor));
|
||||
ASSERT_EQ(tensor.get_shape(), refShape);
|
||||
ASSERT_NO_THROW(otensor = req.get_tensor(outputName));
|
||||
OV_ASSERT_NO_THROW(otensor = req.get_tensor(outputName));
|
||||
ASSERT_EQ(0, otensor.get_size()); // output tensor is not allocated
|
||||
ASSERT_EQ(function->output().get_element_type(), otensor.get_element_type()); // by it has type
|
||||
ASSERT_NO_THROW(req.infer());
|
||||
OV_ASSERT_NO_THROW(req.infer());
|
||||
ASSERT_EQ(otensor.get_shape(), refOutShape);
|
||||
ASSERT_NO_THROW(req.start_async());
|
||||
ASSERT_NO_THROW(req.wait());
|
||||
OV_ASSERT_NO_THROW(req.start_async());
|
||||
OV_ASSERT_NO_THROW(req.wait());
|
||||
ASSERT_EQ(otensor.get_shape(), refOutShape);
|
||||
ASSERT_NO_THROW(tensor = req.get_tensor(outputName));
|
||||
OV_ASSERT_NO_THROW(tensor = req.get_tensor(outputName));
|
||||
ASSERT_EQ(tensor.get_shape(), refOutShape);
|
||||
ASSERT_EQ(otensor.get_shape(), refOutShape);
|
||||
}
|
||||
|
||||
TEST_P(InferRequestDynamicTests, InferDynamicNetworkWithSetTensor2times) {
|
||||
TEST_P(OVInferRequestDynamicTests, InferDynamicNetworkWithSetTensor2times) {
|
||||
const std::string tensor_name = "input_tensor";
|
||||
const ov::Shape refShape = inOutShapes[0].first;
|
||||
const ov::Shape refShape2 = inOutShapes[1].first;
|
||||
@ -350,7 +332,7 @@ TEST_P(InferRequestDynamicTests, InferDynamicNetworkWithSetTensor2times) {
|
||||
const ov::Shape refOutShape2 = inOutShapes[1].second;
|
||||
std::map<std::string, ov::PartialShape> shapes;
|
||||
shapes[tensor_name] = {ov::Dimension::dynamic(), 4, 20, 20};
|
||||
ASSERT_NO_THROW(function->reshape(shapes));
|
||||
OV_ASSERT_NO_THROW(function->reshape(shapes));
|
||||
const std::string outputName = function->outputs().back().get_any_name();
|
||||
// Load ov::Function to target plugins
|
||||
auto execNet = ie->compile_model(function, targetDevice, configuration);
|
||||
@ -358,23 +340,25 @@ TEST_P(InferRequestDynamicTests, InferDynamicNetworkWithSetTensor2times) {
|
||||
ov::runtime::InferRequest req;
|
||||
ov::runtime::Tensor tensor(ov::element::f32, refShape);
|
||||
|
||||
ASSERT_NO_THROW(req = execNet.create_infer_request());
|
||||
ASSERT_NO_THROW(req.set_tensor(function->inputs().back().get_any_name(), tensor));
|
||||
OV_ASSERT_NO_THROW(req = execNet.create_infer_request());
|
||||
OV_ASSERT_NO_THROW(req.set_tensor(function->inputs().back().get_any_name(), tensor));
|
||||
ASSERT_EQ(tensor.get_shape(), refShape);
|
||||
ASSERT_NO_THROW(req.infer());
|
||||
ASSERT_NO_THROW(req.start_async());
|
||||
ASSERT_NO_THROW(req.wait());
|
||||
ASSERT_NO_THROW(tensor = req.get_tensor(outputName));
|
||||
OV_ASSERT_NO_THROW(req.infer());
|
||||
OV_ASSERT_NO_THROW(req.start_async());
|
||||
OV_ASSERT_NO_THROW(req.wait());
|
||||
OV_ASSERT_NO_THROW(tensor = req.get_tensor(outputName));
|
||||
ASSERT_EQ(tensor.get_shape(), refOutShape);
|
||||
|
||||
tensor = ov::runtime::Tensor(ov::element::f32, refShape2);
|
||||
ASSERT_NO_THROW(req.set_tensor(function->inputs().back().get_any_name(), tensor));
|
||||
OV_ASSERT_NO_THROW(req.set_tensor(function->inputs().back().get_any_name(), tensor));
|
||||
ASSERT_EQ(tensor.get_shape(), refShape2);
|
||||
ASSERT_NO_THROW(req.infer());
|
||||
ASSERT_NO_THROW(req.start_async());
|
||||
ASSERT_NO_THROW(req.wait());
|
||||
ASSERT_NO_THROW(tensor = req.get_tensor(outputName));
|
||||
OV_ASSERT_NO_THROW(req.infer());
|
||||
OV_ASSERT_NO_THROW(req.start_async());
|
||||
OV_ASSERT_NO_THROW(req.wait());
|
||||
OV_ASSERT_NO_THROW(tensor = req.get_tensor(outputName));
|
||||
ASSERT_EQ(tensor.get_shape(), refOutShape2);
|
||||
}
|
||||
|
||||
} // namespace BehaviorTestsDefinitions
|
||||
} // namespace behavior
|
||||
} // namespace test
|
||||
} // namespace ov
|
@ -0,0 +1,188 @@
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <chrono>
|
||||
#include <gtest/gtest.h>
|
||||
#include <initializer_list>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <tuple>
|
||||
#include <vector>
|
||||
|
||||
#include "base/ov_behavior_test_utils.hpp"
|
||||
#include "openvino/core/attribute_visitor.hpp"
|
||||
#include "openvino/core/node.hpp"
|
||||
#include "openvino/core/partial_shape.hpp"
|
||||
#include "openvino/core/rank.hpp"
|
||||
#include "openvino/core/shape.hpp"
|
||||
#include "openvino/core/type/element_type.hpp"
|
||||
#include "openvino/core/type/element_type_traits.hpp"
|
||||
#include "openvino/op/parameter.hpp"
|
||||
#include "openvino/core/function.hpp"
|
||||
#include "ngraph_functions/builders.hpp"
|
||||
#include "openvino/runtime/infer_request.hpp"
|
||||
#include "openvino/runtime/tensor.hpp"
|
||||
#include "behavior/ov_infer_request/inference_chaining.hpp"
|
||||
|
||||
namespace ov {
|
||||
namespace test {
|
||||
namespace behavior {
|
||||
|
||||
std::string OVInferenceChaining::getTestCaseName(const testing::TestParamInfo<InferRequestParams>& obj) {
|
||||
return OVInferRequestTests::getTestCaseName(obj);
|
||||
}
|
||||
|
||||
std::shared_ptr<ov::Function> OVInferenceChaining::getFirstStaticFunction(const ov::PartialShape &shape) {
|
||||
auto params = ngraph::builder::makeDynamicParams(element::Type_t::f32, {shape, shape, shape});
|
||||
params[0]->get_output_tensor(0).set_names({"input_tensor_0"});
|
||||
params[0]->set_friendly_name("param_0");
|
||||
params[1]->get_output_tensor(0).set_names({"input_tensor_1"});
|
||||
params[1]->set_friendly_name("param_1");
|
||||
params[2]->get_output_tensor(0).set_names({"input_tensor_2"});
|
||||
params[2]->set_friendly_name("param_2");
|
||||
auto eltwise = ngraph::builder::makeEltwise(params[0], params[1], ngraph::helpers::EltwiseTypes::ADD);
|
||||
auto eltwise2 = ngraph::builder::makeEltwise(eltwise, params[2], ngraph::helpers::EltwiseTypes::ADD);
|
||||
eltwise2->get_output_tensor(0).set_names({"result_tensor_0"});
|
||||
eltwise2->set_friendly_name("result_0");
|
||||
|
||||
return std::make_shared<ov::Function>(eltwise2, ov::ParameterVector(params));
|
||||
}
|
||||
|
||||
std::shared_ptr<ov::Function> OVInferenceChaining::getSecondStaticFunction(const ov::PartialShape &shape) {
|
||||
auto params = ngraph::builder::makeDynamicParams(element::Type_t::f32, {shape, shape});
|
||||
params[0]->get_output_tensor(0).set_names({"input_tensor_0"});
|
||||
params[0]->set_friendly_name("param_0");
|
||||
params[1]->get_output_tensor(0).set_names({"input_tensor_1"});
|
||||
params[1]->set_friendly_name("param_1");
|
||||
auto eltwise = ngraph::builder::makeEltwise(params[0], params[1], ngraph::helpers::EltwiseTypes::MULTIPLY);
|
||||
eltwise->get_output_tensor(0).set_names({"result_tensor_0"});
|
||||
eltwise->set_friendly_name("result_0");
|
||||
|
||||
return std::make_shared<ov::Function>(eltwise, ov::ParameterVector(params));
|
||||
}
|
||||
|
||||
std::shared_ptr<ov::Function> OVInferenceChaining::getThirdStaticFunction(const ov::PartialShape &shape) {
|
||||
auto params = ngraph::builder::makeDynamicParams(element::Type_t::f32, {shape, shape, shape, shape});
|
||||
params[0]->get_output_tensor(0).set_names({"input_tensor_0"});
|
||||
params[0]->set_friendly_name("param_0");
|
||||
params[1]->get_output_tensor(0).set_names({"input_tensor_1"});
|
||||
params[1]->set_friendly_name("param_1");
|
||||
params[2]->get_output_tensor(0).set_names({"input_tensor_2"});
|
||||
params[2]->set_friendly_name("param_2");
|
||||
params[3]->get_output_tensor(0).set_names({"input_tensor_3"});
|
||||
params[3]->set_friendly_name("param_3");
|
||||
auto eltwise = ngraph::builder::makeEltwise(params[0], params[1], ngraph::helpers::EltwiseTypes::ADD);
|
||||
auto eltwise2 = ngraph::builder::makeEltwise(eltwise, params[2], ngraph::helpers::EltwiseTypes::ADD);
|
||||
auto eltwise3 = ngraph::builder::makeEltwise(eltwise2, params[3], ngraph::helpers::EltwiseTypes::MULTIPLY);
|
||||
eltwise3->get_output_tensor(0).set_names({"result_tensor_0"});
|
||||
eltwise3->set_friendly_name("result_0");
|
||||
|
||||
return std::make_shared<ov::Function>(eltwise3, ov::ParameterVector(params));
|
||||
}
|
||||
|
||||
void OVInferenceChaining::Run() {
|
||||
ov::runtime::ExecutableNetwork execNet0, execNet1, execNet2;
|
||||
OV_ASSERT_NO_THROW(execNet0 = core->compile_model(function0, targetDevice, configuration));
|
||||
OV_ASSERT_NO_THROW(execNet1 = core->compile_model(function1, targetDevice, configuration));
|
||||
OV_ASSERT_NO_THROW(execNet2 = core->compile_model(function2, targetDevice, configuration));
|
||||
|
||||
ov::runtime::InferRequest r0, r1, r2;
|
||||
OV_ASSERT_NO_THROW(r0 = execNet0.create_infer_request());
|
||||
OV_ASSERT_NO_THROW(r1 = execNet1.create_infer_request());
|
||||
OV_ASSERT_NO_THROW(r2 = execNet2.create_infer_request());
|
||||
|
||||
// perform inference chaining
|
||||
if (outputToInput) {
|
||||
OV_ASSERT_NO_THROW(r1.set_tensor("input_tensor_0", r0.get_tensor("result_tensor_0")));
|
||||
} else {
|
||||
OV_ASSERT_NO_THROW(r0.set_tensor("result_tensor_0", r1.get_tensor("input_tensor_0")));
|
||||
}
|
||||
|
||||
// create input tensors
|
||||
ov::runtime::Tensor t0 = tensor(std::vector<float>{1.0f, 2.0f, 3.0f});
|
||||
ov::runtime::Tensor t1 = tensor(std::vector<float>{4.0f, 5.0f, 6.0f});
|
||||
ov::runtime::Tensor t2 = tensor(std::vector<float>{7.0f, 8.0f, 9.0f});
|
||||
ov::runtime::Tensor t3 = tensor(std::vector<float>{2.0f, 3.0f, 2.0f});
|
||||
|
||||
OV_ASSERT_NO_THROW(r0.set_tensor("input_tensor_0", t0));
|
||||
OV_ASSERT_NO_THROW(r0.set_tensor("input_tensor_1", t1));
|
||||
OV_ASSERT_NO_THROW(r0.set_tensor("input_tensor_2", t2));
|
||||
OV_ASSERT_NO_THROW(r1.set_tensor("input_tensor_1", t3));
|
||||
|
||||
OV_ASSERT_NO_THROW(r2.set_tensor("input_tensor_0", t0));
|
||||
OV_ASSERT_NO_THROW(r2.set_tensor("input_tensor_1", t1));
|
||||
OV_ASSERT_NO_THROW(r2.set_tensor("input_tensor_2", t2));
|
||||
OV_ASSERT_NO_THROW(r2.set_tensor("input_tensor_3", t3));
|
||||
|
||||
OV_ASSERT_NO_THROW(r0.infer());
|
||||
OV_ASSERT_NO_THROW(r1.infer());
|
||||
OV_ASSERT_NO_THROW(r2.infer());
|
||||
|
||||
// check results
|
||||
std::vector<float> reference1 = {12.0f, 15.0f, 18.0f};
|
||||
std::vector<float> reference2 = {24.0f, 45.0f, 36.0f};
|
||||
|
||||
auto rti = r0.get_tensor("result_tensor_0");
|
||||
auto rt0 = r1.get_tensor("result_tensor_0");
|
||||
auto rt1 = r2.get_tensor("result_tensor_0");
|
||||
|
||||
for (size_t i = 0; i < reference1.size(); ++i) {
|
||||
EXPECT_EQ(reference1[i], rti.data<float>()[i]);
|
||||
EXPECT_EQ(reference2[i], rt0.data<float>()[i]);
|
||||
EXPECT_EQ(reference2[i], rt1.data<float>()[i]);
|
||||
}
|
||||
}
|
||||
|
||||
TEST_P(OVInferenceChaining, StaticOutputToStaticInput) {
|
||||
// Skip test according to plugin specific disabledTestPatterns() (if any)
|
||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||
|
||||
function0 = getFirstStaticFunction();
|
||||
function1 = getSecondStaticFunction();
|
||||
function2 = getThirdStaticFunction();
|
||||
|
||||
Run();
|
||||
}
|
||||
|
||||
TEST_P(OVInferenceChaining, StaticOutputToDynamicInput) {
|
||||
// Skip test according to plugin specific disabledTestPatterns() (if any)
|
||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||
|
||||
const auto dynamic = ov::PartialShape::dynamic(ov::Rank(1));
|
||||
function0 = getFirstStaticFunction();
|
||||
function1 = getSecondStaticFunction(dynamic);
|
||||
function2 = getThirdStaticFunction(dynamic);
|
||||
|
||||
Run();
|
||||
}
|
||||
|
||||
TEST_P(OVInferenceChaining, DynamicOutputToDynamicInput) {
|
||||
// Skip test according to plugin specific disabledTestPatterns() (if any)
|
||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||
|
||||
const auto dynamic = ov::PartialShape::dynamic();
|
||||
function0 = getFirstStaticFunction(dynamic);
|
||||
function1 = getSecondStaticFunction(dynamic);
|
||||
function2 = getThirdStaticFunction(dynamic);
|
||||
|
||||
Run();
|
||||
}
|
||||
|
||||
TEST_P(OVInferenceChaining, DynamicInputToDynamicOutput) {
|
||||
// Skip test according to plugin specific disabledTestPatterns() (if any)
|
||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||
|
||||
this->outputToInput = false;
|
||||
|
||||
const auto dynamic = ov::PartialShape::dynamic();
|
||||
function0 = getFirstStaticFunction(dynamic);
|
||||
function1 = getSecondStaticFunction(dynamic);
|
||||
function2 = getThirdStaticFunction(dynamic);
|
||||
|
||||
Run();
|
||||
}
|
||||
|
||||
} // namespace behavior
|
||||
} // namespace test
|
||||
} // namespace ov
|
@ -0,0 +1,231 @@
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <thread>
|
||||
#include <future>
|
||||
|
||||
#include "shared_test_classes/subgraph/basic_lstm.hpp"
|
||||
#include "behavior/ov_infer_request/io_tensor.hpp"
|
||||
#include "functional_test_utils/ov_tensor_utils.hpp"
|
||||
|
||||
namespace ov {
|
||||
namespace test {
|
||||
namespace behavior {
|
||||
|
||||
std::string OVInferRequestIOTensorTest::getTestCaseName(const testing::TestParamInfo<InferRequestParams>& obj) {
|
||||
return OVInferRequestTests::getTestCaseName(obj);
|
||||
}
|
||||
|
||||
void OVInferRequestIOTensorTest::SetUp() {
|
||||
OVInferRequestTests::SetUp();
|
||||
req = execNet.create_infer_request();
|
||||
input = execNet.input();
|
||||
output = execNet.output();
|
||||
}
|
||||
|
||||
void OVInferRequestIOTensorTest::TearDown() {
|
||||
req = {};
|
||||
input = {};
|
||||
output = {};
|
||||
OVInferRequestTests::TearDown();
|
||||
}
|
||||
|
||||
TEST_P(OVInferRequestIOTensorTest, Cancreate_infer_request) {
|
||||
runtime::InferRequest req;
|
||||
OV_ASSERT_NO_THROW(req = execNet.create_infer_request());
|
||||
}
|
||||
|
||||
TEST_P(OVInferRequestIOTensorTest, failToSetNullptrForInput) {
|
||||
ASSERT_THROW(req.set_tensor(input, {}), ov::Exception);
|
||||
}
|
||||
|
||||
TEST_P(OVInferRequestIOTensorTest, failToSetNullptrForOutput) {
|
||||
OV_ASSERT_NO_THROW(req = execNet.create_infer_request());
|
||||
ASSERT_THROW(req.set_tensor(output, {}), ov::Exception);
|
||||
}
|
||||
|
||||
TEST_P(OVInferRequestIOTensorTest, getAfterSetInputDoNotChangeInput) {
|
||||
auto tensor = create_and_fill_tensor(input.get_element_type(), input.get_shape());
|
||||
OV_ASSERT_NO_THROW(req.set_tensor(input, tensor));
|
||||
runtime::Tensor actual_tensor;
|
||||
OV_ASSERT_NO_THROW(actual_tensor = req.get_tensor(input));
|
||||
|
||||
ASSERT_TRUE(actual_tensor);
|
||||
ASSERT_NE(nullptr, actual_tensor.data());
|
||||
ASSERT_EQ(tensor.data(), actual_tensor.data());
|
||||
ASSERT_EQ(input.get_element_type(), actual_tensor.get_element_type());
|
||||
ASSERT_EQ(input.get_shape(), actual_tensor.get_shape());
|
||||
}
|
||||
|
||||
TEST_P(OVInferRequestIOTensorTest, getAfterSetInputDoNotChangeOutput) {
|
||||
auto tensor = create_and_fill_tensor(output.get_element_type(), output.get_shape());
|
||||
req.set_tensor(output, tensor);
|
||||
auto actual_tensor = req.get_tensor(output);
|
||||
|
||||
ASSERT_TRUE(actual_tensor);
|
||||
ASSERT_FALSE(actual_tensor.data() == nullptr);
|
||||
ASSERT_EQ(actual_tensor.data(), tensor.data());
|
||||
ASSERT_EQ(output.get_element_type(), actual_tensor.get_element_type());
|
||||
ASSERT_EQ(output.get_shape(), actual_tensor.get_shape());
|
||||
}
|
||||
|
||||
TEST_P(OVInferRequestIOTensorTest, failToSetTensorWithIncorrectName) {
|
||||
auto tensor = create_and_fill_tensor(input.get_element_type(), input.get_shape());
|
||||
ASSERT_THROW(req.set_tensor("incorrect_input", tensor), ov::Exception);
|
||||
}
|
||||
|
||||
TEST_P(OVInferRequestIOTensorTest, failToSetInputWithIncorrectSizes) {
|
||||
auto shape = input.get_shape();
|
||||
shape[0] *= 2;
|
||||
auto tensor = create_and_fill_tensor(input.get_element_type(), shape);
|
||||
ASSERT_THROW(req.set_tensor(input, tensor), ov::Exception);
|
||||
}
|
||||
|
||||
TEST_P(OVInferRequestIOTensorTest, failToSetOutputWithIncorrectSizes) {
|
||||
auto shape = output.get_shape();
|
||||
shape[0] *= 2;
|
||||
auto tensor = create_and_fill_tensor(output.get_element_type(), shape);
|
||||
ASSERT_THROW(req.set_tensor(output, tensor), ov::Exception);
|
||||
}
|
||||
|
||||
TEST_P(OVInferRequestIOTensorTest, canInferWithoutSetAndGetInOutSync) {
|
||||
OV_ASSERT_NO_THROW(req.infer());
|
||||
}
|
||||
|
||||
TEST_P(OVInferRequestIOTensorTest, canInferWithoutSetAndGetInOutAsync) {
|
||||
OV_ASSERT_NO_THROW(req.start_async());
|
||||
OV_ASSERT_NO_THROW(req.wait());
|
||||
}
|
||||
|
||||
TEST_P(OVInferRequestIOTensorTest, secondCallGetInputDoNotReAllocateData) {
|
||||
runtime::Tensor tensor1, tensor2;
|
||||
OV_ASSERT_NO_THROW(tensor1 = req.get_tensor(input));
|
||||
OV_ASSERT_NO_THROW(tensor2 = req.get_tensor(input));
|
||||
ASSERT_EQ(tensor1.data(), tensor2.data());
|
||||
}
|
||||
|
||||
TEST_P(OVInferRequestIOTensorTest, secondCallGetOutputDoNotReAllocateData) {
|
||||
runtime::Tensor tensor1, tensor2;
|
||||
OV_ASSERT_NO_THROW(tensor1 = req.get_tensor(output));
|
||||
OV_ASSERT_NO_THROW(tensor2 = req.get_tensor(output));
|
||||
ASSERT_EQ(tensor1.data(), tensor2.data());
|
||||
}
|
||||
|
||||
TEST_P(OVInferRequestIOTensorTest, secondCallGetInputAfterInferSync) {
|
||||
OV_ASSERT_NO_THROW(req.infer());
|
||||
OV_ASSERT_NO_THROW(req.start_async());
|
||||
OV_ASSERT_NO_THROW(req.wait());
|
||||
runtime::Tensor tensor1, tensor2;
|
||||
OV_ASSERT_NO_THROW(tensor1 = req.get_tensor(input));
|
||||
OV_ASSERT_NO_THROW(tensor2 = req.get_tensor(input));
|
||||
ASSERT_EQ(tensor1.data(), tensor2.data());
|
||||
}
|
||||
|
||||
TEST_P(OVInferRequestIOTensorTest, secondCallGetOutputAfterInferSync) {
|
||||
OV_ASSERT_NO_THROW(req.infer());
|
||||
OV_ASSERT_NO_THROW(req.start_async());
|
||||
OV_ASSERT_NO_THROW(req.wait());
|
||||
runtime::Tensor tensor1, tensor2;
|
||||
OV_ASSERT_NO_THROW(tensor1 = req.get_tensor(output));
|
||||
OV_ASSERT_NO_THROW(tensor2 = req.get_tensor(output));
|
||||
ASSERT_EQ(tensor1.data(), tensor2.data());
|
||||
}
|
||||
|
||||
TEST_P(OVInferRequestIOTensorTest, canSetInputTensorForInferRequest) {
|
||||
auto input_tensor = create_and_fill_tensor(input.get_element_type(), input.get_shape());
|
||||
OV_ASSERT_NO_THROW(req.set_tensor(input, input_tensor));
|
||||
runtime::Tensor actual_tensor;
|
||||
OV_ASSERT_NO_THROW(actual_tensor = req.get_tensor(input));
|
||||
ASSERT_EQ(input_tensor.data(), actual_tensor.data());
|
||||
}
|
||||
|
||||
TEST_P(OVInferRequestIOTensorTest, canSetOutputBlobForInferRequest) {
|
||||
auto output_tensor = create_and_fill_tensor(output.get_element_type(), output.get_shape());
|
||||
OV_ASSERT_NO_THROW(req.set_tensor(output, output_tensor));
|
||||
runtime::Tensor actual_tensor;
|
||||
OV_ASSERT_NO_THROW(actual_tensor = req.get_tensor(output));
|
||||
ASSERT_EQ(output_tensor.data(), actual_tensor.data());
|
||||
}
|
||||
|
||||
TEST_P(OVInferRequestIOTensorTest, canInferWithSetInOutBlobs) {
|
||||
auto input_tensor = create_and_fill_tensor(input.get_element_type(), input.get_shape());
|
||||
OV_ASSERT_NO_THROW(req.set_tensor(input, input_tensor));
|
||||
auto output_tensor = create_and_fill_tensor(output.get_element_type(), output.get_shape());
|
||||
OV_ASSERT_NO_THROW(req.set_tensor(output, output_tensor));
|
||||
OV_ASSERT_NO_THROW(req.infer());
|
||||
}
|
||||
|
||||
TEST_P(OVInferRequestIOTensorTest, canInferWithGetIn) {
|
||||
runtime::Tensor input_tensor;
|
||||
OV_ASSERT_NO_THROW(input_tensor = req.get_tensor(input));
|
||||
OV_ASSERT_NO_THROW(req.infer());
|
||||
OV_ASSERT_NO_THROW(req.start_async());
|
||||
OV_ASSERT_NO_THROW(req.wait());
|
||||
OV_ASSERT_NO_THROW(req.get_tensor(output));
|
||||
}
|
||||
|
||||
TEST_P(OVInferRequestIOTensorTest, canInferWithGetOut) {
|
||||
runtime::Tensor output_tensor;
|
||||
OV_ASSERT_NO_THROW(output_tensor = req.get_tensor(output));
|
||||
OV_ASSERT_NO_THROW(req.infer());
|
||||
OV_ASSERT_NO_THROW(req.start_async());
|
||||
OV_ASSERT_NO_THROW(req.wait());
|
||||
OV_ASSERT_NO_THROW(req.get_tensor(output));
|
||||
}
|
||||
|
||||
std::string OVInferRequestIOTensorSetPrecisionTest::getTestCaseName(const testing::TestParamInfo<OVInferRequestSetPrecisionParams>& obj) {
|
||||
element::Type type;
|
||||
std::string targetDevice;
|
||||
std::map<std::string, std::string> configuration;
|
||||
std::tie(type, targetDevice, configuration) = obj.param;
|
||||
std::ostringstream result;
|
||||
result << "type=" << type << "_";
|
||||
result << "targetDevice=" << targetDevice << "_";
|
||||
if (!configuration.empty()) {
|
||||
using namespace CommonTestUtils;
|
||||
for (auto &configItem : configuration) {
|
||||
result << "configItem=" << configItem.first << "_" << configItem.second << "_";
|
||||
}
|
||||
}
|
||||
return result.str();
|
||||
}
|
||||
|
||||
void OVInferRequestIOTensorSetPrecisionTest::SetUp() {
|
||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||
std::tie(element_type, target_device, config) = this->GetParam();
|
||||
function = ngraph::builder::subgraph::makeConvPoolRelu();
|
||||
execNet = core->compile_model(function, target_device, config);
|
||||
req = execNet.create_infer_request();
|
||||
}
|
||||
|
||||
void OVInferRequestIOTensorSetPrecisionTest::TearDown() {
|
||||
execNet = {};
|
||||
req = {};
|
||||
}
|
||||
|
||||
TEST_P(OVInferRequestIOTensorSetPrecisionTest, CanSetInBlobWithDifferentPrecision) {
|
||||
for (auto&& output : execNet.outputs()) {
|
||||
auto output_tensor = create_and_fill_tensor(element_type, output.get_shape());
|
||||
if (output.get_element_type() == element_type) {
|
||||
OV_ASSERT_NO_THROW(req.set_tensor(output, output_tensor));
|
||||
} else {
|
||||
ASSERT_THROW(req.set_tensor(output, output_tensor), ov::Exception);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
TEST_P(OVInferRequestIOTensorSetPrecisionTest, CanSetOutBlobWithDifferentPrecision) {
|
||||
for (auto&& input : execNet.inputs()) {
|
||||
auto input_tensor = create_and_fill_tensor(element_type, input.get_shape());
|
||||
if (input.get_element_type() == element_type) {
|
||||
OV_ASSERT_NO_THROW(req.set_tensor(input, input_tensor));
|
||||
} else {
|
||||
ASSERT_THROW(req.set_tensor(input, input_tensor), ov::Exception);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace behavior
|
||||
} // namespace test
|
||||
} // namespace ov
|
@ -0,0 +1,92 @@
|
||||
// Copyright (C) 2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <future>
|
||||
|
||||
#include "behavior/ov_infer_request/multithreading.hpp"
|
||||
|
||||
namespace ov {
|
||||
namespace test {
|
||||
namespace behavior {
|
||||
|
||||
std::string OVInferRequestMultithreadingTests::getTestCaseName(const testing::TestParamInfo<InferRequestParams>& obj) {
|
||||
return OVInferRequestTests::getTestCaseName(obj);
|
||||
}
|
||||
|
||||
TEST_P(OVInferRequestMultithreadingTests, canRun3SyncRequestsConsistentlyFromThreads) {
|
||||
runtime::InferRequest req1, req2, req3;
|
||||
OV_ASSERT_NO_THROW(req1 = execNet.create_infer_request());
|
||||
OV_ASSERT_NO_THROW(req2 = execNet.create_infer_request());
|
||||
OV_ASSERT_NO_THROW(req3 = execNet.create_infer_request());
|
||||
|
||||
auto f1 = std::async(std::launch::async, [&] { req1.infer(); });
|
||||
auto f2 = std::async(std::launch::async, [&] { req2.infer(); });
|
||||
auto f3 = std::async(std::launch::async, [&] { req3.infer(); });
|
||||
|
||||
f1.wait();
|
||||
f2.wait();
|
||||
f3.wait();
|
||||
|
||||
OV_ASSERT_NO_THROW(f1.get());
|
||||
OV_ASSERT_NO_THROW(f2.get());
|
||||
OV_ASSERT_NO_THROW(f3.get());
|
||||
}
|
||||
|
||||
TEST_P(OVInferRequestMultithreadingTests, canRun3AsyncRequestsConsistentlyFromThreadsWithoutWait) {
|
||||
runtime::InferRequest req1, req2, req3;
|
||||
OV_ASSERT_NO_THROW(req1 = execNet.create_infer_request());
|
||||
OV_ASSERT_NO_THROW(req2 = execNet.create_infer_request());
|
||||
OV_ASSERT_NO_THROW(req3 = execNet.create_infer_request());
|
||||
|
||||
OV_ASSERT_NO_THROW(req1.infer());
|
||||
OV_ASSERT_NO_THROW(req2.infer());
|
||||
OV_ASSERT_NO_THROW(req3.infer());
|
||||
|
||||
auto f1 = std::async(std::launch::async, [&] { req1.start_async(); });
|
||||
auto f2 = std::async(std::launch::async, [&] { req2.start_async(); });
|
||||
auto f3 = std::async(std::launch::async, [&] { req3.start_async(); });
|
||||
|
||||
f1.wait();
|
||||
f2.wait();
|
||||
f3.wait();
|
||||
|
||||
OV_ASSERT_NO_THROW(f1.get());
|
||||
OV_ASSERT_NO_THROW(f2.get());
|
||||
OV_ASSERT_NO_THROW(f3.get());
|
||||
}
|
||||
|
||||
TEST_P(OVInferRequestMultithreadingTests, canRun3AsyncRequestsConsistentlyWithWait) {
|
||||
runtime::InferRequest req1, req2, req3;
|
||||
OV_ASSERT_NO_THROW(req1 = execNet.create_infer_request());
|
||||
OV_ASSERT_NO_THROW(req2 = execNet.create_infer_request());
|
||||
OV_ASSERT_NO_THROW(req3 = execNet.create_infer_request());
|
||||
|
||||
req1.start_async();
|
||||
OV_ASSERT_NO_THROW(req1.wait());
|
||||
|
||||
req2.start_async();
|
||||
OV_ASSERT_NO_THROW(req2.wait());
|
||||
|
||||
req3.start_async();
|
||||
OV_ASSERT_NO_THROW(req3.wait());
|
||||
}
|
||||
|
||||
TEST_P(OVInferRequestMultithreadingTests, canRun3AsyncRequestsParallelWithWait) {
|
||||
runtime::InferRequest req1, req2, req3;
|
||||
OV_ASSERT_NO_THROW(req1 = execNet.create_infer_request());
|
||||
OV_ASSERT_NO_THROW(req2 = execNet.create_infer_request());
|
||||
OV_ASSERT_NO_THROW(req3 = execNet.create_infer_request());
|
||||
|
||||
req1.start_async();
|
||||
req2.start_async();
|
||||
req3.start_async();
|
||||
|
||||
OV_ASSERT_NO_THROW(req2.wait());
|
||||
OV_ASSERT_NO_THROW(req1.wait());
|
||||
OV_ASSERT_NO_THROW(req3.wait());
|
||||
}
|
||||
|
||||
} // namespace behavior
|
||||
} // namespace test
|
||||
} // namespace ov
|
@ -0,0 +1,42 @@
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
|
||||
#include "behavior/ov_infer_request/perf_counters.hpp"
|
||||
#include "openvino/runtime/profiling_info.hpp"
|
||||
|
||||
namespace ov {
|
||||
namespace test {
|
||||
namespace behavior {
|
||||
|
||||
std::string OVInferRequestPerfCountersTest::getTestCaseName(const testing::TestParamInfo<InferRequestParams>& obj) {
|
||||
return OVInferRequestTests::getTestCaseName(obj);
|
||||
}
|
||||
|
||||
void OVInferRequestPerfCountersTest::SetUp() {
|
||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||
std::tie(targetDevice, configuration) = this->GetParam();
|
||||
function = ngraph::builder::subgraph::makeConvPoolRelu();
|
||||
configuration.insert({ InferenceEngine::PluginConfigParams::KEY_PERF_COUNT, InferenceEngine::PluginConfigParams::YES });
|
||||
execNet = core->compile_model(function, targetDevice, configuration);
|
||||
req = execNet.create_infer_request();
|
||||
}
|
||||
|
||||
TEST_P(OVInferRequestPerfCountersTest, NotEmptyAfterAsyncInfer) {
|
||||
OV_ASSERT_NO_THROW(req.start_async());
|
||||
OV_ASSERT_NO_THROW(req.wait());
|
||||
std::vector<runtime::ProfilingInfo> perf;
|
||||
OV_ASSERT_NO_THROW(perf = req.get_profiling_info());
|
||||
ASSERT_FALSE(perf.empty());
|
||||
}
|
||||
|
||||
TEST_P(OVInferRequestPerfCountersTest, NotEmptyAfterSyncInfer) {
|
||||
OV_ASSERT_NO_THROW(req.infer());
|
||||
std::vector<runtime::ProfilingInfo> perf;
|
||||
OV_ASSERT_NO_THROW(perf = req.get_profiling_info());
|
||||
ASSERT_FALSE(perf.empty());
|
||||
}
|
||||
} // namespace behavior
|
||||
} // namespace test
|
||||
} // namespace ov
|
@ -0,0 +1,84 @@
|
||||
// Copyright (C) 2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "behavior/ov_infer_request/wait.hpp"
|
||||
#include "openvino/runtime/exception.hpp"
|
||||
|
||||
namespace ov {
|
||||
namespace test {
|
||||
namespace behavior {
|
||||
|
||||
std::string OVInferRequestWaitTests::getTestCaseName(const testing::TestParamInfo<InferRequestParams>& obj) {
|
||||
return OVInferRequestTests::getTestCaseName(obj);
|
||||
}
|
||||
|
||||
void OVInferRequestWaitTests::SetUp() {
|
||||
OVInferRequestTests::SetUp();
|
||||
req = execNet.create_infer_request();
|
||||
input = execNet.input();
|
||||
output = execNet.output();
|
||||
}
|
||||
|
||||
void OVInferRequestWaitTests::TearDown() {
|
||||
req = {};
|
||||
input = {};
|
||||
output = {};
|
||||
OVInferRequestTests::TearDown();
|
||||
}
|
||||
|
||||
TEST_P(OVInferRequestWaitTests, CorrectOneAsyncInferWithGetInOutWithInfWait) {
|
||||
runtime::Tensor tensor;
|
||||
OV_ASSERT_NO_THROW(tensor = req.get_tensor(input));
|
||||
OV_ASSERT_NO_THROW(req.infer());
|
||||
OV_ASSERT_NO_THROW(req.start_async());
|
||||
OV_ASSERT_NO_THROW(req.wait());
|
||||
OV_ASSERT_NO_THROW(tensor = req.get_tensor(output));
|
||||
}
|
||||
|
||||
// Plugin correct infer request with allocating input and result BlobMaps inside plugin
|
||||
TEST_P(OVInferRequestWaitTests, canstart_asyncInferWithGetInOutWithStatusOnlyWait) {
|
||||
runtime::Tensor tensor;
|
||||
OV_ASSERT_NO_THROW(tensor = req.get_tensor(input));
|
||||
OV_ASSERT_NO_THROW(req.infer());
|
||||
OV_ASSERT_NO_THROW(req.start_async());
|
||||
OV_ASSERT_NO_THROW(req.wait_for({}));
|
||||
}
|
||||
|
||||
TEST_P(OVInferRequestWaitTests, canWaitWithotStartSsync) {
|
||||
OV_ASSERT_NO_THROW(req.wait());
|
||||
OV_ASSERT_NO_THROW(req.wait_for({}));
|
||||
OV_ASSERT_NO_THROW(req.wait_for(std::chrono::milliseconds{1}));
|
||||
}
|
||||
|
||||
TEST_P(OVInferRequestWaitTests, throwExceptionOnSetTensorAfterAsyncInfer) {
|
||||
auto&& config = configuration;
|
||||
auto itConfig = config.find(CONFIG_KEY(CPU_THROUGHPUT_STREAMS));
|
||||
if (itConfig != config.end()) {
|
||||
if (itConfig->second != "CPU_THROUGHPUT_AUTO") {
|
||||
if (std::stoi(itConfig->second) == 0) {
|
||||
GTEST_SKIP() << "Not applicable with disabled streams";
|
||||
}
|
||||
}
|
||||
}
|
||||
auto output_tensor = req.get_tensor(input);
|
||||
OV_ASSERT_NO_THROW(req.wait_for({}));
|
||||
OV_ASSERT_NO_THROW(req.start_async());
|
||||
OV_ASSERT_NO_THROW(try {
|
||||
req.set_tensor(input, output_tensor);
|
||||
} catch (const ov::runtime::Busy&) {});
|
||||
OV_ASSERT_NO_THROW(req.wait_for({}));
|
||||
OV_ASSERT_NO_THROW(req.wait());
|
||||
}
|
||||
|
||||
TEST_P(OVInferRequestWaitTests, throwExceptionOnGetTensorAfterAsyncInfer) {
|
||||
OV_ASSERT_NO_THROW(req.start_async());
|
||||
OV_ASSERT_NO_THROW(try {
|
||||
req.get_tensor(input);
|
||||
} catch (const ov::runtime::Busy&) {});
|
||||
OV_ASSERT_NO_THROW(req.wait());
|
||||
}
|
||||
|
||||
} // namespace behavior
|
||||
} // namespace test
|
||||
} // namespace ov
|
@ -1,6 +1,11 @@
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#pragma once
|
||||
|
||||
@ -55,6 +60,23 @@ inline bool strDoesnotContain(const std::string & str, const std::string & subst
|
||||
#define ASSERT_STRINGEQ(lhs, rhs) \
|
||||
compare_cpp_strings(lhs, rhs)
|
||||
|
||||
#define OV_ASSERT_NO_THROW(statement) \
|
||||
OV_ASSERT_NO_THROW_(statement, GTEST_FATAL_FAILURE_)
|
||||
|
||||
#define OV_ASSERT_NO_THROW_(statement, fail) \
|
||||
GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
|
||||
if (::testing::internal::AlwaysTrue()) { \
|
||||
try { \
|
||||
GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \
|
||||
} catch (const std::exception& e) { \
|
||||
fail("Expected: " #statement " doesn't throw an exception.\n" \
|
||||
" Actual: it throws.") << e.what(); \
|
||||
} catch (...) { \
|
||||
fail("Expected: " #statement " doesn't throw an exception.\n" \
|
||||
" Actual: it throws."); \
|
||||
} \
|
||||
}
|
||||
|
||||
inline void compare_blob(InferenceEngine::Blob::Ptr lhs, InferenceEngine::Blob::Ptr rhs) {
|
||||
ASSERT_EQ(lhs.get(), rhs.get());
|
||||
//TODO: add blob specific comparison for general case
|
||||
|
@ -10,6 +10,7 @@
|
||||
#include <algorithm>
|
||||
#include <cctype>
|
||||
#include <chrono>
|
||||
#include <random>
|
||||
|
||||
#ifdef _WIN32
|
||||
#ifndef NOMINMAX
|
||||
|
@ -7,6 +7,7 @@
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include <string>
|
||||
#include "test_assertions.hpp"
|
||||
|
||||
namespace CommonTestUtils {
|
||||
|
||||
|
@ -0,0 +1,19 @@
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <openvino/runtime/tensor.hpp>
|
||||
|
||||
namespace ov {
|
||||
namespace test {
|
||||
ov::runtime::Tensor create_and_fill_tensor(
|
||||
const ov::element::Type element_type,
|
||||
const ov::Shape& shape,
|
||||
const uint32_t range = 10,
|
||||
const int32_t start_from = 0,
|
||||
const int32_t resolution = 1,
|
||||
const int seed = 1);
|
||||
} // namespace test
|
||||
} // namespace ov
|
@ -0,0 +1,53 @@
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
|
||||
#include "functional_test_utils/ov_tensor_utils.hpp"
|
||||
#include "common_test_utils/data_utils.hpp"
|
||||
#include "openvino/core/type/element_type_traits.hpp"
|
||||
#include "ngraph/coordinate_transform.hpp"
|
||||
#include <queue>
|
||||
|
||||
namespace ov {
|
||||
namespace test {
|
||||
ov::runtime::Tensor create_and_fill_tensor(
|
||||
const ov::element::Type element_type,
|
||||
const ov::Shape& shape,
|
||||
const uint32_t range,
|
||||
const int32_t start_from,
|
||||
const int32_t resolution,
|
||||
const int seed) {
|
||||
auto tensor = ov::runtime::Tensor{element_type, shape};
|
||||
#define CASE(X) case X: ::CommonTestUtils::fill_data_random( \
|
||||
tensor.data<element_type_traits<X>::value_type>(), \
|
||||
shape_size(shape), \
|
||||
range, start_from, resolution, seed); break;
|
||||
switch (element_type) {
|
||||
CASE(ov::element::Type_t::boolean)
|
||||
CASE(ov::element::Type_t::bf16)
|
||||
CASE(ov::element::Type_t::f16)
|
||||
CASE(ov::element::Type_t::f32)
|
||||
CASE(ov::element::Type_t::f64)
|
||||
CASE(ov::element::Type_t::i8)
|
||||
CASE(ov::element::Type_t::i16)
|
||||
CASE(ov::element::Type_t::i32)
|
||||
CASE(ov::element::Type_t::i64)
|
||||
CASE(ov::element::Type_t::u8)
|
||||
CASE(ov::element::Type_t::u16)
|
||||
CASE(ov::element::Type_t::u32)
|
||||
CASE(ov::element::Type_t::u64)
|
||||
case ov::element::Type_t::u1:
|
||||
case ov::element::Type_t::i4:
|
||||
case ov::element::Type_t::u4:
|
||||
::CommonTestUtils::fill_data_random(
|
||||
static_cast<uint8_t*>(tensor.data()),
|
||||
tensor.get_byte_size(),
|
||||
range, start_from, resolution, seed); break;
|
||||
default: OPENVINO_UNREACHABLE("Unsupported element type: ", element_type);
|
||||
}
|
||||
#undef CASE
|
||||
return tensor;
|
||||
}
|
||||
} // namespace test
|
||||
} // namespace ov
|
@ -166,8 +166,7 @@ public:
|
||||
* @return true if this object can be dynamically cast to the type const T*. Otherwise, false
|
||||
*/
|
||||
template <typename T>
|
||||
bool is() const noexcept {
|
||||
static_assert(std::is_base_of<Tensor, T>::value, "Could not check type that is not inherited from Tensor");
|
||||
typename std::enable_if<std::is_base_of<Tensor, T>::value, bool>::type is() const noexcept {
|
||||
try {
|
||||
T::type_check(*this);
|
||||
} catch (...) {
|
||||
@ -183,8 +182,7 @@ public:
|
||||
* @return T object
|
||||
*/
|
||||
template <typename T>
|
||||
const T as() const {
|
||||
static_assert(std::is_base_of<Tensor, T>::value, "Could not check type that is not inherited from Tensor");
|
||||
const typename std::enable_if<std::is_base_of<Tensor, T>::value, T>::type as() const {
|
||||
T::type_check(*this);
|
||||
return *static_cast<const T*>(this);
|
||||
}
|
||||
@ -195,7 +193,7 @@ public:
|
||||
* @tparam T Type to cast to. Must represent a class derived from the Tensor
|
||||
* @return T object
|
||||
*/
|
||||
template <typename T>
|
||||
template <typename T, typename = typename std::enable_if<std::is_base_of<Tensor, T>::value>::type>
|
||||
operator T() const {
|
||||
return as<T>();
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user