Add test for RESULT_NOT_READY returned from Wait() in async mode (#2793)

* Add shared test for RESULT_NOT_READY return from Wait() in async mode

* Instantiate test for RESULT_NOT_READY for GNA Plugin only

* Fix compile error

* Increase model size for the RESULT_NOT_READY test

* Reuse most of the test

* Apply review

  - Fix typo

* Make the test deterministic

* Use callback timestamp

* Apply review

* Use promise and future
This commit is contained in:
Krzysztof Bruniecki 2020-11-23 14:18:17 +01:00 committed by GitHub
parent 1c7cfb7c7d
commit 9b85d67cce
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 87 additions and 26 deletions

View File

@ -17,4 +17,11 @@ INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferRequestTests,
::testing::Values(std::map<std::string, std::string>({}))),
InferRequestTests::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferRequestTestsResultNotReady,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_GNA),
::testing::Values(std::map<std::string, std::string>({}))),
InferRequestTests::getTestCaseName);
} // namespace

View File

@ -4,6 +4,8 @@
#pragma once
#include <chrono>
#include <future>
#include <tuple>
#include <vector>
#include <string>
@ -23,6 +25,7 @@
#include "functional_test_utils/plugin_cache.hpp"
#include "functional_test_utils/blob_utils.hpp"
#include "ngraph_functions/subgraph_builders.hpp"
#include "subgraph_tests/basic_lstm.hpp"
namespace BehaviorTestsDefinitions {
using InferRequestTests = BehaviorTestsUtils::BehaviorTestsBasic;
@ -623,4 +626,39 @@ TEST_P(InferRequestTests, returnDeviceBusyOnGetPerformanceCountAfterAsyncInfer)
std::cout << "Exception" << e.what() << std::endl;
}
}
} // namespace BehaviorTestsDefinitions
class InferRequestTestsResultNotReady : public InferRequestTests {
};
TEST_P(InferRequestTestsResultNotReady, ReturnResultNotReadyFromWaitInAsyncModeForTooSmallTimeout) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Create CNNNetwork from ngraph::Function
// return function which computes around 20ms on GNA SW
function = LayerTestsDefinitions::Basic_LSTM_S::GetNetwork(3000, 380);
InferenceEngine::CNNNetwork cnnNet(function);
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration);
// Create InferRequest
InferenceEngine::InferRequest req;
ASSERT_NO_THROW(req = execNet.CreateInferRequest());
InferenceEngine::ResponseDesc response;
InferenceEngine::StatusCode sts = InferenceEngine::StatusCode::OK;
std::promise<std::chrono::system_clock::time_point> callbackTimeStamp;
auto callbackTimeStampFuture = callbackTimeStamp.get_future();
// add a callback to the request and capture the timestamp
req.SetCompletionCallback([&]() {
callbackTimeStamp.set_value(std::chrono::system_clock::now());
});
req.StartAsync();
ASSERT_NO_THROW(sts = req.Wait(InferenceEngine::IInferRequest::WaitMode::STATUS_ONLY));
// get timestamp taken AFTER return from the Wait(STATUS_ONLY)
const auto afterWaitTimeStamp = std::chrono::system_clock::now();
// IF the callback timestamp is larger than the afterWaitTimeStamp
// then we should observe RESULT_NOT_READY
if (afterWaitTimeStamp < callbackTimeStampFuture.get()) {
ASSERT_TRUE(sts == InferenceEngine::StatusCode::RESULT_NOT_READY);
}
ASSERT_NO_THROW(req.Wait(InferenceEngine::IInferRequest::WaitMode::RESULT_READY));
}
} // namespace BehaviorTestsDefinitions

View File

@ -27,7 +27,11 @@ public:
static std::string getTestCaseName(testing::TestParamInfo<basicLstmParams> obj);
void Run() override;
static std::shared_ptr<ngraph::Function> GetNetwork(uint64_t thirdDimOut,
uint64_t hiddenSize,
const InferenceEngine::Precision& netPrecission = InferenceEngine::Precision::FP32,
std::vector<float>* hidden_memory_init_out = nullptr,
std::vector<float>* cell_memory_init_out = nullptr);
protected:
size_t hidden_size;
std::vector<float> hidden_memory_init;

View File

@ -47,52 +47,64 @@ void Basic_LSTM_S::SetUp() {
InferenceEngine::Precision netPrecision;
std::tie(netPrecision, targetDevice, configuration) = this->GetParam();
auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
auto params = ngraph::builder::makeParams(ngPrc, { {1, 490} });
hidden_size = 118;
const size_t batch_size = 1;
outPrc = InferenceEngine::Precision::FP32;
//Reshape_1 [1,490] -> [1, 10, 49]
std::vector<uint64_t> outFormShapes1 = { batch_size, 10, 49 };
auto pattern1 = std::make_shared<ngraph::opset1::Constant>(ngraph::element::Type_t::i64, ngraph::Shape{3}, outFormShapes1);
function = GetNetwork(49, hidden_size, netPrecision, &hidden_memory_init, &cell_memory_init);
}
std::shared_ptr<ngraph::Function> Basic_LSTM_S::GetNetwork(uint64_t thirdDimOut,
uint64_t hiddenSize,
const InferenceEngine::Precision& netPrecission,
std::vector<float>* hidden_memory_init_out,
std::vector<float>* cell_memory_init_out) {
auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecission);
auto params = ngraph::builder::makeParams(ngPrc, { {1, 10 * thirdDimOut} });
const size_t batch_size = 1;
//Reshape_1 [1,thirdDimOut*10] -> [1, 10, thirdDimOut]
std::vector<uint64_t> outFormShapes1 = { batch_size, 10, thirdDimOut };
auto pattern1 = std::make_shared<ngraph::opset1::Constant>(ngraph::element::Type_t::i64, ngraph::Shape{ 3 }, outFormShapes1);
auto reshape1 = std::make_shared<ngraph::opset1::Reshape>(params[0], pattern1, false);
auto reshape1_shape = reshape1->output(0).get_shape();
auto H_init = ngraph::builder::makeConstant<float>(ngPrc, { batch_size, hidden_size }, {}, true);
auto C_init = ngraph::builder::makeConstant<float>(ngPrc, { batch_size, hidden_size }, {}, true);
hidden_memory_init = std::static_pointer_cast<ngraph::opset1::Constant>(H_init)->cast_vector<float>();
cell_memory_init = std::static_pointer_cast<ngraph::opset1::Constant>(C_init)->cast_vector<float>();
auto H_t = std::make_shared<ngraph::opset1::Parameter>(ngPrc, ngraph::Shape{ batch_size, hidden_size });
auto C_t = std::make_shared<ngraph::opset1::Parameter>(ngPrc, ngraph::Shape{ batch_size, hidden_size });
auto H_init = ngraph::builder::makeConstant<float>(ngPrc, { batch_size, hiddenSize }, {}, true);
auto C_init = ngraph::builder::makeConstant<float>(ngPrc, { batch_size, hiddenSize }, {}, true);
if (hidden_memory_init_out != nullptr) {
*hidden_memory_init_out = std::static_pointer_cast<ngraph::opset1::Constant>(H_init)->cast_vector<float>();
}
if (cell_memory_init_out != nullptr) {
*cell_memory_init_out = std::static_pointer_cast<ngraph::opset1::Constant>(C_init)->cast_vector<float>();
}
auto H_t = std::make_shared<ngraph::opset1::Parameter>(ngPrc, ngraph::Shape{ batch_size, hiddenSize });
auto C_t = std::make_shared<ngraph::opset1::Parameter>(ngPrc, ngraph::Shape{ batch_size, hiddenSize });
H_t->set_friendly_name("hidden_state_1");
C_t->set_friendly_name("cell_state_1");
//Body
auto X = std::make_shared<ngraph::opset1::Parameter>(ngPrc, ngraph::Shape{ batch_size, 1, reshape1_shape[2] });
auto weightsNode = ngraph::builder::makeConstant<float>(ngPrc, { 4 * hidden_size, reshape1_shape[2] }, {}, true);
auto reccurrenceWeightsNode = ngraph::builder::makeConstant<float>(ngPrc, { 4 * hidden_size, hidden_size }, {}, true);
auto weightsNode = ngraph::builder::makeConstant<float>(ngPrc, { 4 * hiddenSize, reshape1_shape[2] }, {}, true);
auto reccurrenceWeightsNode = ngraph::builder::makeConstant<float>(ngPrc, { 4 * hiddenSize, hiddenSize }, {}, true);
//lstm [1, 10], [1, 118], [1, 118] -> [1, 118], [1, 118]
outFormShapes1 = { batch_size, reshape1_shape[2] };
auto constantX = std::make_shared<ngraph::opset1::Constant>(ngraph::element::i64, ngraph::Shape{2}, outFormShapes1);
auto constantX = std::make_shared<ngraph::opset1::Constant>(ngraph::element::i64, ngraph::Shape{ 2 }, outFormShapes1);
auto lstm1 = std::make_shared<ngraph::opset4::LSTMCell>(std::make_shared<ngraph::opset1::Reshape>(X, constantX, false),
H_t, C_t,
weightsNode, reccurrenceWeightsNode, hidden_size);
weightsNode, reccurrenceWeightsNode, hiddenSize);
auto H_o = lstm1->output(0);
auto C_o = lstm1->output(1);
//TensorIterator [1, 10, 49] [1, 118], [1, 118] -> [1, 118]
//TensorIterator [1, 10, thirdDimOut] [1, 118], [1, 118] -> [1, 118]
auto body = std::make_shared<ngraph::Function>(
ngraph::OutputVector{ H_o, C_o }, ngraph::ParameterVector{ X, H_t, C_t });
auto tensor_iterator = std::make_shared<ngraph::opset1::TensorIterator>();
tensor_iterator->set_body(body);
//input tensor shape: [1, 10, 49] chunk shape: [1, 1, 49]
//input tensor shape: [1, 10, thirdDimOut] chunk shape: [1, 1, thirdDimOut]
tensor_iterator->set_sliced_input(X, reshape1, 0, 1, 1, -1, 1);
tensor_iterator->set_merged_input(H_t, H_init, H_o);
tensor_iterator->set_merged_input(C_t, C_init, C_o);
@ -100,10 +112,10 @@ void Basic_LSTM_S::SetUp() {
auto out0 = tensor_iterator->get_iter_value(H_o, -1);
const size_t output_size = 12;
auto fc1 = ngraph::builder::makeFullyConnected(out0, ngPrc, output_size, true, { hidden_size, output_size }, { 1 }, { 1 });
auto fc1 = ngraph::builder::makeFullyConnected(out0, ngPrc, output_size, true, { hiddenSize, output_size }, { 1 }, { 1 });
ngraph::ResultVector results {std::make_shared<ngraph::opset1::Result>(fc1)};
function = std::make_shared<ngraph::Function>(results, params, "Basic_LSTM_S");
ngraph::ResultVector results{ std::make_shared<ngraph::opset1::Result>(fc1) };
return std::make_shared<ngraph::Function>(results, params, "Basic_LSTM_S");
}
void Basic_LSTM_S::Run() {