Allow the framework to generate input data without infer request object (#4117)

This commit is contained in:
Alexander Perepelkin 2021-03-02 10:40:02 +03:00 committed by GitHub
parent 7e57ec54b3
commit 6c522fe76b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
38 changed files with 62 additions and 87 deletions

View File

@ -53,6 +53,7 @@ public:
configuration.insert(exportConfiguration.begin(), exportConfiguration.end());
LoadNetwork();
GenerateInputs();
Infer();
executableNetwork.Export("exported_model.blob");
for (auto const &configItem : importConfiguration) {

View File

@ -20,6 +20,7 @@ TEST_P(FakeQuantizeLayerTest, CompareWithRefs) {
size_t nIterations = 1;
for (; nIterations != 0; nIterations--) {
UpdateSeed();
GenerateInputs();
Infer();
Validate();
}

View File

@ -20,6 +20,7 @@ TEST_P(StaticShapeLoopTest, CompareWithRefs) {
TEST_P(StaticShapeLoopTest, CompareWithPredefinedRefs) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
LoadNetwork();
GenerateInputs();
Infer();
auto expectedOutputs = PredefinedRefs(); // use predefined refs instead of CalculateRefs function
const auto& actualOutputs = GetOutputs();

View File

@ -56,6 +56,7 @@ TEST_P(Basic_LSTM_S, CompareWithRefImpl_LowLatencyTransformation) {
}
}
IE_SUPPRESS_DEPRECATED_END
GenerateInputs();
// Run and compare
Infer();
const auto& actualOutputs = GetOutputs();

View File

@ -10,6 +10,7 @@ namespace SubgraphTestsDefinitions {
TEST_P(EltwiseAfterConvTest, CompareWithRefImpl) {
LoadNetwork();
GenerateInputs();
Infer();
// Create another copy of function for validation since some data will be changed by GNA plugin
SetUp();
@ -18,6 +19,7 @@ TEST_P(EltwiseAfterConvTest, CompareWithRefImpl) {
TEST_P(EltwiseBeforeConvTest, CompareWithRefImpl) {
LoadNetwork();
GenerateInputs();
Infer();
// Create another copy of function for validation since some data will be changed by GNA plugin
SetUp();

View File

@ -10,6 +10,7 @@ namespace SubgraphTestsDefinitions {
TEST_P(ScaleShiftAfterConvTest, CompareWithRefImpl) {
LoadNetwork();
GenerateInputs();
Infer();
// Create another copy of function for validation since some data will be changed by GNA plugin
SetUp();
@ -18,6 +19,7 @@ TEST_P(ScaleShiftAfterConvTest, CompareWithRefImpl) {
TEST_P(ScaleShiftBeforeConvTest, CompareWithRefImpl) {
LoadNetwork();
GenerateInputs();
Infer();
// Create another copy of function for validation since some data will be changed by GNA plugin
SetUp();

View File

@ -38,6 +38,7 @@ void ImportNetworkTestBase::Run() {
configuration.insert(exportConfiguration.begin(), exportConfiguration.end());
LoadNetwork();
GenerateInputs();
Infer();
const auto& actualOutputs = GetOutputs();
@ -52,6 +53,7 @@ void ImportNetworkTestBase::Run() {
exportImportNetwork();
const auto importedExecNetwork = executableNetwork;
GenerateInputs();
Infer();
ASSERT_EQ(importedExecNetwork.GetInputsInfo().size(), compiledExecNetwork.GetInputsInfo().size());

View File

@ -95,6 +95,7 @@ void ProposalBehTest::SetUp() {
void ProposalBehTest::Run() {
LoadNetwork();
GenerateInputs();
Infer();
}

View File

@ -66,6 +66,7 @@ void SetBlobOfKindTest::Run() {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
LoadNetwork();
GenerateInputs();
if (isBlobKindSupported(core, targetDevice, blobKind)) {
Infer();

View File

@ -43,6 +43,7 @@ TEST_P(MultipleAllocations, InferWorksCorrectAfterAllocations) {
std::cout << "Infer(): " << j << std::flush;
GenerateInputs();
Infer();
Validate();
}

View File

@ -154,6 +154,7 @@ namespace ConfigurationTestsDefinitions {
void DynamicBatchTest::Run() {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
LoadNetwork();
GenerateInputs();
Infer();
Validate();
}

View File

@ -189,6 +189,8 @@ protected:
void LoadNetwork();
virtual void GenerateInputs();
virtual void Infer();
TargetDevice targetDevice;

View File

@ -62,7 +62,7 @@ class DetectionOutputLayerTest : public testing::WithParamInterface<DetectionOut
static std::string getTestCaseName(testing::TestParamInfo<DetectionOutputParams> obj);
ngraph::op::DetectionOutputAttrs attrs;
std::vector<InferenceEngine::SizeVector> inShapes;
void Infer() override;
void GenerateInputs() override;
void Compare(const std::vector<std::uint8_t> &expected, const InferenceEngine::Blob::Ptr &actual) override;
protected:
void SetUp() override;

View File

@ -36,7 +36,7 @@ public:
protected:
void SetUp() override;
void Infer() override;
void GenerateInputs() override;
private:
ngraph::helpers::SequenceTestsMode m_mode;

View File

@ -32,7 +32,7 @@ class LSTMSequenceTest : public testing::WithParamInterface<LSTMSequenceParams>,
public:
static std::string getTestCaseName(const testing::TestParamInfo<LSTMSequenceParams> &obj);
protected:
void Infer() override;
void GenerateInputs() override;
void SetUp() override;
private:

View File

@ -34,7 +34,7 @@ using NmsParams = std::tuple<InputShapeParams,
class NmsLayerTest : public testing::WithParamInterface<NmsParams>, virtual public LayerTestsUtils::LayerTestsCommon {
public:
static std::string getTestCaseName(testing::TestParamInfo<NmsParams> obj);
void Infer() override;
void GenerateInputs() override;
void Compare(const std::vector<std::vector<std::uint8_t>> &expectedOutputs, const std::vector<InferenceEngine::Blob::Ptr> &actualOutputs) override;
protected:

View File

@ -32,7 +32,7 @@ class PSROIPoolingLayerTest : public testing::WithParamInterface<psroiParams>,
virtual public LayerTestsUtils::LayerTestsCommon {
public:
static std::string getTestCaseName(testing::TestParamInfo<psroiParams> obj);
void Infer() override;
void GenerateInputs() override;
protected:
void SetUp() override;

View File

@ -34,7 +34,7 @@ public:
protected:
void SetUp() override;
void Infer() override;
void GenerateInputs() override;
private:
ngraph::helpers::SequenceTestsMode m_mode;

View File

@ -23,7 +23,6 @@ class ROIAlignLayerTest : public testing::WithParamInterface<roialignParams>,
virtual public LayerTestsUtils::LayerTestsCommon {
public:
static std::string getTestCaseName(testing::TestParamInfo<roialignParams> obj);
void Infer() override;
protected:
void SetUp() override;

View File

@ -30,7 +30,7 @@ class ROIPoolingLayerTest : public testing::WithParamInterface<roiPoolingParamsT
virtual public LayerTestsUtils::LayerTestsCommon {
public:
static std::string getTestCaseName(testing::TestParamInfo<roiPoolingParamsTuple> obj);
void Infer() override;
void GenerateInputs() override;
protected:
void SetUp() override;

View File

@ -178,6 +178,7 @@ void LayerTestsCommon::Run() {
try {
LoadNetwork();
GenerateInputs();
Infer();
Validate();
reportStatus(PassRate::Statuses::PASSED);
@ -336,20 +337,34 @@ void LayerTestsCommon::LoadNetwork() {
executableNetwork = core->LoadNetwork(cnnNetwork, targetDevice, configuration);
}
void LayerTestsCommon::Infer() {
inferRequest = executableNetwork.CreateInferRequest();
inputs.clear();
void LayerTestsCommon::GenerateInputs() {
const auto& inputsInfo = executableNetwork.GetInputsInfo();
for (const auto& param : function->get_parameters()) {
const auto& functionParams = function->get_parameters();
for (int i = 0; i < functionParams.size(); ++i) {
const auto& param = functionParams[i];
const auto infoIt = inputsInfo.find(param->get_friendly_name());
GTEST_ASSERT_NE(infoIt, inputsInfo.cend());
const auto& info = infoIt->second;
auto blob = GenerateInput(*info);
inferRequest.SetBlob(info->name(), blob);
inputs.push_back(blob);
}
}
void LayerTestsCommon::Infer() {
inferRequest = executableNetwork.CreateInferRequest();
const auto& inputsInfo = executableNetwork.GetInputsInfo();
const auto& functionParams = function->get_parameters();
for (int i = 0; i < functionParams.size(); ++i) {
const auto& param = functionParams[i];
const auto infoIt = inputsInfo.find(param->get_friendly_name());
GTEST_ASSERT_NE(infoIt, inputsInfo.cend());
const auto& info = infoIt->second;
auto blob = inputs[i];
inferRequest.SetBlob(info->name(), blob);
}
if (configuration.count(InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_ENABLED) &&
configuration.count(InferenceEngine::PluginConfigParams::YES)) {
auto batchSize = executableNetwork.GetInputsInfo().begin()->second->getTensorDesc().getDims()[0] / 2;

View File

@ -184,7 +184,7 @@ void ActivationParamLayerTest::generateActivationBlob(std::vector<float> constan
void ActivationParamLayerTest::Infer() {
inferRequest = executableNetwork.CreateInferRequest();
inputs.clear();
auto blobInput = inferRequest.GetBlob("Input");
blobInput = FuncTestUtils::createAndFillBlobFloat(blobInput->getTensorDesc());

View File

@ -64,10 +64,7 @@ std::string DetectionOutputLayerTest::getTestCaseName(testing::TestParamInfo<Det
return result.str();
}
void DetectionOutputLayerTest::Infer() {
inferRequest = executableNetwork.CreateInferRequest();
inputs.clear();
void DetectionOutputLayerTest::GenerateInputs() {
size_t it = 0;
for (const auto &input : cnnNetwork.getInputsInfo()) {
const auto &info = input.second;
@ -88,11 +85,9 @@ void DetectionOutputLayerTest::Infer() {
blob = make_blob_with_precision(info->getTensorDesc());
blob->allocate();
CommonTestUtils::fill_data_random_float<InferenceEngine::Precision::FP32>(blob, range, 0, resolution);
inferRequest.SetBlob(info->name(), blob);
inputs.push_back(blob);
it++;
}
inferRequest.Infer();
}
void DetectionOutputLayerTest::Compare(const std::vector<std::uint8_t> &expected, const InferenceEngine::Blob::Ptr &actual) {

View File

@ -93,25 +93,14 @@ namespace LayerTestsDefinitions {
}
}
void GRUSequenceTest::Infer() {
inferRequest = executableNetwork.CreateInferRequest();
inputs.clear();
void GRUSequenceTest::GenerateInputs() {
for (const auto &input : executableNetwork.GetInputsInfo()) {
const auto &info = input.second;
auto blob = GenerateInput(*info);
if (input.first == "seq_lengths") {
blob = FuncTestUtils::createAndFillBlob(info->getTensorDesc(), m_max_seq_len, 0);
}
inferRequest.SetBlob(info->name(), blob);
inputs.push_back(blob);
}
if (configuration.count(InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_ENABLED) &&
configuration.count(InferenceEngine::PluginConfigParams::YES)) {
auto batchSize = executableNetwork.GetInputsInfo().begin()->second->getTensorDesc().getDims()[0] / 2;
inferRequest.SetBatch(batchSize);
}
inferRequest.Infer();
}
} // namespace LayerTestsDefinitions

View File

@ -93,10 +93,7 @@ namespace LayerTestsDefinitions {
}
}
void LSTMSequenceTest::Infer() {
inferRequest = executableNetwork.CreateInferRequest();
inputs.clear();
void LSTMSequenceTest::GenerateInputs() {
for (const auto &input : executableNetwork.GetInputsInfo()) {
const auto &info = input.second;
auto blob = GenerateInput(*info);
@ -104,14 +101,7 @@ namespace LayerTestsDefinitions {
blob = FuncTestUtils::createAndFillBlob(info->getTensorDesc(), m_max_seq_len, 0);
}
inferRequest.SetBlob(info->name(), blob);
inputs.push_back(blob);
}
if (configuration.count(InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_ENABLED) &&
configuration.count(InferenceEngine::PluginConfigParams::YES)) {
auto batchSize = executableNetwork.GetInputsInfo().begin()->second->getTensorDesc().getDims()[0] / 2;
inferRequest.SetBatch(batchSize);
}
inferRequest.Infer();
}
} // namespace LayerTestsDefinitions

View File

@ -37,10 +37,7 @@ std::string NmsLayerTest::getTestCaseName(testing::TestParamInfo<NmsParams> obj)
return result.str();
}
void NmsLayerTest::Infer() {
inferRequest = executableNetwork.CreateInferRequest();
inputs.clear();
void NmsLayerTest::GenerateInputs() {
size_t it = 0;
for (const auto &input : cnnNetwork.getInputsInfo()) {
const auto &info = input.second;
@ -53,11 +50,9 @@ void NmsLayerTest::Infer() {
} else {
blob = GenerateInput(*info);
}
inferRequest.SetBlob(info->name(), blob);
inputs.push_back(blob);
it++;
}
inferRequest.Infer();
}
void NmsLayerTest::Compare(const std::vector<std::vector<std::uint8_t>> &expectedOutputs, const std::vector<Blob::Ptr> &actualOutputs) {

View File

@ -77,10 +77,7 @@ static void fillROITensor(float* buffer, int numROIs, int batchSize,
}
}
void PSROIPoolingLayerTest::Infer() {
inferRequest = executableNetwork.CreateInferRequest();
inputs.clear();
void PSROIPoolingLayerTest::GenerateInputs() {
auto inputShape = cnnNetwork.getInputShapes().begin()->second;
size_t it = 0;
@ -97,11 +94,9 @@ void PSROIPoolingLayerTest::Infer() {
} else {
blob = GenerateInput(*info);
}
inferRequest.SetBlob(info->name(), blob);
inputs.push_back(blob);
it++;
}
inferRequest.Infer();
}
void PSROIPoolingLayerTest::SetUp() {

View File

@ -91,10 +91,7 @@ namespace LayerTestsDefinitions {
}
}
void RNNSequenceTest::Infer() {
inferRequest = executableNetwork.CreateInferRequest();
inputs.clear();
void RNNSequenceTest::GenerateInputs() {
for (const auto &input : executableNetwork.GetInputsInfo()) {
const auto &info = input.second;
auto blob = GenerateInput(*info);
@ -102,14 +99,7 @@ namespace LayerTestsDefinitions {
blob = FuncTestUtils::createAndFillBlob(info->getTensorDesc(), m_max_seq_len, 0);
}
inferRequest.SetBlob(info->name(), blob);
inputs.push_back(blob);
}
if (configuration.count(InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_ENABLED) &&
configuration.count(InferenceEngine::PluginConfigParams::YES)) {
auto batchSize = executableNetwork.GetInputsInfo().begin()->second->getTensorDesc().getDims()[0] / 2;
inferRequest.SetBatch(batchSize);
}
inferRequest.Infer();
}
} // namespace LayerTestsDefinitions

View File

@ -74,20 +74,6 @@ static void fillIdxTensor(std::vector<int> & idx, int batchSize) {
}
}
void ROIAlignLayerTest::Infer() {
inferRequest = executableNetwork.CreateInferRequest();
inputs.clear();
auto inputShape = cnnNetwork.getInputShapes().begin()->second;
for (const auto &input : cnnNetwork.getInputsInfo()) {
const auto &info = input.second;
Blob::Ptr blob;
blob = GenerateInput(*info);
inferRequest.SetBlob(info->name(), blob);
inputs.push_back(blob);
}
inferRequest.Infer();
}
void ROIAlignLayerTest::SetUp() {
std::vector<size_t> inputShape;
std::vector<size_t> coordsShape;

View File

@ -36,10 +36,7 @@ namespace LayerTestsDefinitions {
return result.str();
}
void ROIPoolingLayerTest::Infer() {
inferRequest = executableNetwork.CreateInferRequest();
inputs.clear();
void ROIPoolingLayerTest::GenerateInputs() {
auto feat_map_shape = cnnNetwork.getInputShapes().begin()->second;
const auto is_roi_max_mode = (pool_method == ngraph::helpers::ROIPoolingTypes::ROI_MAX);
@ -60,11 +57,9 @@ namespace LayerTestsDefinitions {
} else {
blob = GenerateInput(*info);
}
inferRequest.SetBlob(info->name(), blob);
inputs.push_back(blob);
it++;
}
inferRequest.Infer();
}
void ROIPoolingLayerTest::SetUp() {

View File

@ -105,6 +105,7 @@ void Basic_LSTM_S::Run() {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
LoadNetwork();
GenerateInputs();
Infer();
const auto& actualOutputs = GetOutputs();

View File

@ -115,6 +115,7 @@ namespace SubgraphTestsDefinitions {
}
}
IE_SUPPRESS_DEPRECATED_END
GenerateInputs();
Infer();
switchToNgraphFriendlyModel();
Validate();

View File

@ -63,6 +63,7 @@ namespace SubgraphTestsDefinitions {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
LoadNetwork();
GenerateInputs();
Infer();
switchToNgraphFriendlyModel();
Validate();

View File

@ -265,6 +265,7 @@ namespace SubgraphTestsDefinitions {
}
}
IE_SUPPRESS_DEPRECATED_END
GenerateInputs();
Infer();
switchToNgraphFriendlyModel();
Validate();
@ -303,6 +304,7 @@ namespace SubgraphTestsDefinitions {
}
}
IE_SUPPRESS_DEPRECATED_END
GenerateInputs();
Infer();
CreatePureTensorIteratorModel();

View File

@ -128,6 +128,7 @@ void MemoryEltwiseReshapeConcatTest::Run() {
memory_init.data(), memory_init.size());
states[0].SetState(state_values_blob);
IE_SUPPRESS_DEPRECATED_END
GenerateInputs();
Infer();
initNgraphFriendlyModel();
Validate();

View File

@ -414,6 +414,7 @@ void MultipleLSTMCellTest::Run() {
}
}
IE_SUPPRESS_DEPRECATED_END
GenerateInputs();
Infer();
switchToNgraphFriendlyModel();
Validate();
@ -464,6 +465,7 @@ void MultipleLSTMCellTest::RunLowLatency(bool regular_api) {
}
}
IE_SUPPRESS_DEPRECATED_END
GenerateInputs();
Infer();
// Calculate ref values for Unrolled TI

View File

@ -85,6 +85,7 @@ namespace SubgraphTestsDefinitions {
}
}
IE_SUPPRESS_DEPRECATED_END
GenerateInputs();
Infer();
switchToNgraphFriendlyModel();
Validate();

View File

@ -48,6 +48,7 @@ void SoftsignTest::Run() {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
LoadNetwork();
GenerateInputs();
Infer();
function = GenerateNgraphFriendlySoftSign();