Shared execution_graph_tests to API2.0 (#21718)

This commit is contained in:
Vitaliy Urusovskij 2023-12-21 16:27:24 +04:00 committed by GitHub
parent 46ebee6be6
commit 1fd3399cdf
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
13 changed files with 190 additions and 230 deletions

View File

@ -5,29 +5,25 @@
#include <common_test_utils/test_constants.hpp>
#include "execution_graph_tests/add_output.hpp"
#include "functional_test_utils/plugin_cache.hpp"
#include "ngraph/op/multiply.hpp"
#include "ngraph/op/sigmoid.hpp"
#include "ov_models/builders.hpp"
#include "openvino/op/multiply.hpp"
#include "openvino/op/sigmoid.hpp"
#include "openvino/op/constant.hpp"
using namespace ngraph;
inline std::shared_ptr<ov::Model> getTargetNetwork() {
auto shape = ov::Shape{1, 200};
auto type = ov::element::f32;
inline InferenceEngine::CNNNetwork getTargetNetwork() {
ngraph::Shape shape = {1, 200};
ngraph::element::Type type = ngraph::element::f32;
auto input = std::make_shared<op::v0::Parameter>(type, shape);
auto mem_i = std::make_shared<op::v0::Constant>(type, shape, 0);
auto mem_r = std::make_shared<op::v3::ReadValue>(mem_i, "id");
auto input = std::make_shared<ov::op::v0::Parameter>(type, shape);
auto mem_i = std::make_shared<ov::op::v0::Constant>(type, shape, 0);
auto mem_r = std::make_shared<ov::op::v3::ReadValue>(mem_i, "id");
auto mul = std::make_shared<ov::op::v1::Multiply>(mem_r, input);
auto mem_w = std::make_shared<op::v3::Assign>(mul, "id");
auto sigm = std::make_shared<ngraph::op::Sigmoid>(mul);
mem_r->set_friendly_name("Memory");
auto mem_w = std::make_shared<ov::op::v3::Assign>(mul, "id");
auto sigm = std::make_shared<ov::op::v0::Sigmoid>(mul);
mem_r->output(0).set_names({"Memory"});
mem_w->add_control_dependency(mem_r);
sigm->add_control_dependency(mem_w);
auto function = std::make_shared<ngraph::Function>(ngraph::NodeVector{sigm}, ngraph::ParameterVector{input}, "addOutput");
return InferenceEngine::CNNNetwork{function};
sigm->output(0).set_names({"Sigmoid"});
return std::make_shared<ov::Model>(sigm->outputs(), ov::ParameterVector{input}, "addOutput");
}
std::vector<addOutputsParams> testCases = {

View File

@ -8,20 +8,19 @@
#include "common_test_utils/test_constants.hpp"
using namespace ExecutionGraphTests;
using namespace InferenceEngine;
namespace {
const std::vector<RuntimePrecisionSpecificParams> params = {
/* {Ngraph function builder, function input precision, expected runtime precisions} */
{makeEltwiseFunction, {Precision::FP32, Precision::FP32}, {{"Eltwise", Precision::FP32}}},
{makeEltwiseFunction, {Precision::U16, Precision::U16}, {{"Eltwise", Precision::I32}}},
{makeEltwiseFunction, {Precision::BF16, Precision::BF16}, {{"Eltwise", Precision::BF16}}},
{makeEltwiseFunction, {Precision::U8, Precision::U8}, {{"Eltwise", Precision::U8}}},
{makeEltwiseFunction, {Precision::I8, Precision::I8}, {{"Eltwise", Precision::I8}}},
{makeFakeQuantizeReluFunction, {Precision::FP32}, {{"Relu", Precision::FP32}}},
{makeFakeQuantizeReluFunction, {Precision::U8}, {{"Relu", Precision::U8}}},
{makeFakeQuantizeBinaryConvolutionFunction, {Precision::FP32}, {{"FakeQuantize", Precision::FP32}, {"BinaryConvolution", Precision::BIN}}},
{makeEltwiseFunction, {ov::element::f32, ov::element::f32}, {{"Eltwise", ov::element::f32}}},
{makeEltwiseFunction, {ov::element::u16, ov::element::u16}, {{"Eltwise", ov::element::i32}}},
{makeEltwiseFunction, {ov::element::bf16, ov::element::bf16}, {{"Eltwise", ov::element::bf16}}},
{makeEltwiseFunction, {ov::element::u8, ov::element::u8}, {{"Eltwise", ov::element::u8}}},
{makeEltwiseFunction, {ov::element::i8, ov::element::i8}, {{"Eltwise", ov::element::i8}}},
{makeFakeQuantizeReluFunction, {ov::element::f32}, {{"Relu", ov::element::f32}}},
{makeFakeQuantizeReluFunction, {ov::element::u8}, {{"Relu", ov::element::u8}}},
{makeFakeQuantizeBinaryConvolutionFunction, {ov::element::f32}, {{"FakeQuantize", ov::element::f32}, {"BinaryConvolution", ov::element::u1}}},
};
INSTANTIATE_TEST_SUITE_P(smoke_ExecGraph, ExecGraphRuntimePrecision,

View File

@ -9,7 +9,7 @@
#include "functional_test_utils/plugin_cache.hpp"
#include "ov_models/builders.hpp"
InferenceEngine::CNNNetwork getTargetNetwork() {
std::shared_ptr<ngraph::Function> getTargetNetwork() {
ngraph::Shape shape = {1, 200};
ngraph::element::Type type = ngraph::element::f32;
@ -19,13 +19,11 @@ InferenceEngine::CNNNetwork getTargetNetwork() {
auto mul = std::make_shared<ov::op::v1::Multiply>(mem_r, input);
auto mem_w = std::make_shared<ngraph::op::v3::Assign>(mul, "r_1-3");
auto sigm = std::make_shared<ov::op::v0::Sigmoid>(mul);
mem_r->set_friendly_name("Memory_1");
mem_r->output(0).set_names({"Memory_1"});
mem_w->add_control_dependency(mem_r);
sigm->add_control_dependency(mem_w);
auto function =
std::make_shared<ngraph::Function>(ngraph::NodeVector{sigm}, ngraph::ParameterVector{input}, "addOutput");
return InferenceEngine::CNNNetwork{function};
sigm->output(0).set_names({"Sigmoid"});
return std::make_shared<ngraph::Function>(ngraph::NodeVector{sigm}, ngraph::ParameterVector{input}, "addOutput");
}
std::vector<addOutputsParams> testCases = {

View File

@ -6,10 +6,9 @@
#include "common_test_utils/test_common.hpp"
#include <ie_core.hpp>
typedef std::tuple<
InferenceEngine::CNNNetwork, // CNNNetwork to work with
std::shared_ptr<ov::Model>, // Model to work with
std::vector<std::string>, // Target layers to add as outputs
std::string> // Target device name
addOutputsParams;
@ -17,10 +16,6 @@ typedef std::tuple<
class AddOutputsTest : public ov::test::TestsCommon,
public testing::WithParamInterface<addOutputsParams> {
protected:
InferenceEngine::CNNNetwork net;
std::vector<std::string> outputsToAdd;
std::string deviceName;
void SetUp() override;
public:
static std::string getTestCaseName(const testing::TestParamInfo<addOutputsParams> &obj);

View File

@ -6,16 +6,14 @@
#include <string>
#include "ov_models/builders.hpp"
#include "shared_test_classes/base/layer_test_utils.hpp"
#include "common_test_utils/test_common.hpp"
namespace ExecutionGraphTests {
class ExecGraphInputsFusingBinConv : public ov::test::TestsCommon, public testing::WithParamInterface<std::string> {
public:
static std::string getTestCaseName(testing::TestParamInfo<std::string> obj);
std::shared_ptr<ngraph::Function> fnPtr;
std::string targetDevice;
std::shared_ptr<ov::Model> ov_model;
protected:
void SetUp() override;

View File

@ -4,24 +4,26 @@
#pragma once
#include <gtest/gtest.h>
#include <tuple>
#include <vector>
#include <string>
#include <memory>
#include "shared_test_classes/base/layer_test_utils.hpp"
#include "common_test_utils/test_common.hpp"
#include "ov_models/builders.hpp"
namespace ExecutionGraphTests {
std::shared_ptr<ngraph::Function> makeEltwiseFunction(const std::vector<InferenceEngine::Precision>& inputPrecisions);
std::shared_ptr<ngraph::Function> makeFakeQuantizeReluFunction(const std::vector<InferenceEngine::Precision>& inputPrecisions);
std::shared_ptr<ngraph::Function> makeFakeQuantizeBinaryConvolutionFunction(const std::vector<InferenceEngine::Precision> &inputPrecisions);
std::shared_ptr<ov::Model> makeEltwiseFunction(const std::vector<ov::element::Type>& inputPrecisions);
std::shared_ptr<ov::Model> makeFakeQuantizeReluFunction(const std::vector<ov::element::Type>& inputPrecisions);
std::shared_ptr<ov::Model> makeFakeQuantizeBinaryConvolutionFunction(const std::vector<ov::element::Type> &inputPrecisions);
struct RuntimePrecisionSpecificParams {
std::function<std::shared_ptr<ngraph::Function>(const std::vector<InferenceEngine::Precision>& inputPrecisions)> makeFunction;
std::vector<InferenceEngine::Precision> inputPrecisions;
std::map<std::string, InferenceEngine::Precision> expectedPrecisions;
std::function<std::shared_ptr<ov::Model>(const std::vector<ov::element::Type>& inputPrecisions)> makeFunction;
std::vector<ov::element::Type> inputPrecisions;
std::map<std::string, ov::element::Type> expectedPrecisions;
};
using ExecGraphRuntimePrecisionParams = std::tuple<
@ -34,8 +36,8 @@ class ExecGraphRuntimePrecision : public testing::WithParamInterface<ExecGraphRu
public:
static std::string getTestCaseName(testing::TestParamInfo<ExecGraphRuntimePrecisionParams> obj);
std::string targetDevice;
std::shared_ptr<ngraph::Function> fnPtr;
std::map<std::string, InferenceEngine::Precision> expectedPrecisions;
std::shared_ptr<ov::Model> fnPtr;
std::map<std::string, ov::element::Type> expectedPrecisions;
protected:
void SetUp() override;

View File

@ -4,39 +4,47 @@
#include "common_test_utils/common_utils.hpp"
#include "functional_test_utils/plugin_cache.hpp"
#include "functional_test_utils/ov_plugin_cache.hpp"
#include "functional_test_utils/skip_tests_config.hpp"
#include "execution_graph_tests/add_output.hpp"
std::string AddOutputsTest::getTestCaseName(const testing::TestParamInfo<addOutputsParams> &obj) {
std::ostringstream results;
InferenceEngine::CNNNetwork net;
std::shared_ptr<ov::Model> net;
std::vector<std::string> outputsToAdd;
std::string deviceName;
std::tie(net, outputsToAdd, deviceName) = obj.param;
results << "Outputs:" << ov::test::utils::vec2str<std::string>(outputsToAdd);
results << "Outputs=" << ov::test::utils::vec2str<std::string>(outputsToAdd);
results << "Dev=" << deviceName;
return results.str();
}
void AddOutputsTest::SetUp() {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
std::tie(net, outputsToAdd, deviceName) = GetParam();
}
TEST_P(AddOutputsTest, smoke_CheckOutputExist) {
std::shared_ptr<ov::Model> net;
std::vector<std::string> outputsToAdd;
std::string deviceName;
std::tie(net, outputsToAdd, deviceName) = GetParam();
std::vector<std::string> expectedOutputs = outputsToAdd;
for (const auto &out : net.getOutputsInfo()) {
expectedOutputs.push_back(out.first);
for (const auto &out : net->outputs()) {
expectedOutputs.push_back(out.get_any_name());
}
for (const auto &out : outputsToAdd) {
net.addOutput(out);
}
auto ie = PluginCache::get().ie(deviceName);
auto executableNet = ie->LoadNetwork(net, deviceName);
auto outputs = executableNet.GetOutputsInfo();
for (const auto &out : expectedOutputs) {
ASSERT_TRUE(outputs.count(out)) << "Layer " << out << " expected to be in network outputs but it's not!";
for (const auto &out : outputsToAdd) {
net->add_output(out);
}
auto ie = ov::test::utils::PluginCache::get().core(deviceName);
auto executableNet = ie->compile_model(net, deviceName);
auto outputs = executableNet.outputs();
for (const auto &expected_out_name : expectedOutputs) {
auto res = std::find_if(outputs.begin(), outputs.end(), [&](const ov::Output<const ov::Node>& out){
return expected_out_name == out.get_any_name();
}) != outputs.end();
ASSERT_TRUE(res) << "Layer " << expected_out_name << " expected to be in network outputs but it's not!";
}
}

View File

@ -5,9 +5,11 @@
#include "execution_graph_tests/keep_assign.hpp"
#include "functional_test_utils/skip_tests_config.hpp"
#include <ngraph/ngraph.hpp>
#include <ngraph/opsets/opset5.hpp>
#include <inference_engine.hpp>
#include "openvino/runtime/core.hpp"
#include "openvino/core/model.hpp"
#include "openvino/op/add.hpp"
#include "openvino/op/multiply.hpp"
#include "openvino/op/constant.hpp"
namespace ExecutionGraphTests {
@ -26,37 +28,34 @@ void ExecGraphKeepAssignNode::SetUp() {
*/
TEST_P(ExecGraphKeepAssignNode, KeepAssignNode) {
auto device_name = this->GetParam();
ngraph::Shape shape = {3, 2};
ngraph::element::Type type = ngraph::element::f32;
ov::Shape shape = {3, 2};
ov::element::Type type = ov::element::f32;
using std::make_shared;
// Some simple graph with Memory(Assign) node // in read //
auto input = make_shared<ov::op::v0::Parameter>(type, shape); // | \ / //
auto mem_i = make_shared<ov::op::v0::Constant>(type, shape, 0); // | mul //
auto mem_r = make_shared<ov::op::v3::ReadValue>(mem_i, "id"); // | / \ //
auto mul = make_shared<ov::op::v1::Multiply>(mem_r, input); // sum assign //
auto mem_w = make_shared<ov::op::v3::Assign>(mul, "id"); // | //
auto sum = make_shared<ov::op::v1::Add>(mul, input); // out //
// Some simple graph with Memory(Assign) node // in read //
auto input = std::make_shared<ov::op::v0::Parameter>(type, shape); // | \ / //
auto mem_i = std::make_shared<ov::op::v0::Constant>(type, shape, 0); // | mul //
auto mem_r = std::make_shared<ov::op::v3::ReadValue>(mem_i, "id"); // | / \ //
auto mul = std::make_shared<ov::op::v1::Multiply>(mem_r, input); // sum assign //
auto mem_w = std::make_shared<ov::op::v3::Assign>(mul, "id"); // | //
auto sum = std::make_shared<ov::op::v1::Add>(mul, input); // out //
mem_w->add_control_dependency(mem_r);
sum->add_control_dependency(mem_w);
auto function = std::make_shared<ngraph::Function>(
ngraph::NodeVector {sum},
ngraph::ParameterVector {input},
"SimpleNet");
auto model = std::make_shared<ov::Model>(
ov::NodeVector {sum},
ov::ParameterVector {input},
"SimpleNet");
// Load into plugin and get exec graph
auto ie = InferenceEngine::Core();
auto net = InferenceEngine::CNNNetwork(function);
auto exec_net = ie.LoadNetwork(net, device_name);
auto exec_graph = exec_net.GetExecGraphInfo();
auto exec_ops = exec_graph.getFunction()->get_ops();
auto core = ov::Core();
auto compiled_model = core.compile_model(model, device_name);
auto runtime_model = compiled_model.get_runtime_model();
auto runtime_ops = runtime_model->get_ops();
// Check Memory(Assign) node existence
bool assign_node_found;
for (auto &node : exec_ops) {
for (auto &node : runtime_ops) {
auto var = node->get_rt_info()["layerType"];
auto s_val = var.as<std::string>();

View File

@ -6,10 +6,9 @@
#include <gtest/gtest.h>
#include <ngraph/function.hpp>
#include <ngraph/opsets/opset5.hpp>
#include <inference_engine.hpp>
#include "openvino/core/model.hpp"
#include "openvino/op/non_max_suppression.hpp"
#include "openvino/runtime/core.hpp"
#include "functional_test_utils/skip_tests_config.hpp"
#include "common_test_utils/ov_test_utils.hpp"
@ -36,51 +35,42 @@ std::string ExecGraphNmsTransformLastNode::getTestCaseName(
TEST_P(ExecGraphNmsTransformLastNode, CheckIfCanBeInfered) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
using namespace ngraph;
auto device_name = this->GetParam();
ngraph::Shape boxes_shape = {1, 2, 4};
ngraph::Shape scores_shape = {1, 1, 2};
ov::Shape boxes_shape = {1, 2, 4};
ov::Shape scores_shape = {1, 1, 2};
float in_boxes[8] = {1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0};
float in_scores[8] = {1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0};
auto boxes = std::make_shared<ov::op::v0::Parameter>(element::f32, boxes_shape);
auto scores = std::make_shared<ov::op::v0::Parameter>(element::f32, scores_shape);
auto max_output_boxes_per_class = opset5::Constant::create(element::i64, Shape{}, {10});
auto iou_threshold = opset5::Constant::create(element::f32, Shape{}, {0.75});
auto score_threshold = opset5::Constant::create(element::f32, Shape{}, {0.7});
auto nms = std::make_shared<opset5::NonMaxSuppression>(boxes, scores, max_output_boxes_per_class,
iou_threshold, score_threshold,
opset5::NonMaxSuppression::BoxEncodingType::CORNER, true, element::i64);
ngraph::ResultVector results {
auto boxes = std::make_shared<ov::op::v0::Parameter>(ov::element::f32, boxes_shape);
auto scores = std::make_shared<ov::op::v0::Parameter>(ov::element::f32, scores_shape);
auto max_output_boxes_per_class = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{}, {10});
auto iou_threshold = ov::op::v0::Constant::create(ov::element::f32, ov::Shape{}, {0.75});
auto score_threshold = ov::op::v0::Constant::create(ov::element::f32, ov::Shape{}, {0.7});
auto nms = std::make_shared<ov::op::v5::NonMaxSuppression>(boxes, scores, max_output_boxes_per_class,
iou_threshold, score_threshold,
ov::op::v5::NonMaxSuppression::BoxEncodingType::CORNER, true, ov::element::i64);
nms->output(0).set_names({"nms"});
ov::ResultVector results {
std::make_shared<ov::op::v0::Result>(nms->output(0)),
};
auto f = std::make_shared<Function>(results, ParameterVector{boxes, scores}, "NMS");
auto f = std::make_shared<ov::Model>(results, ov::ParameterVector{boxes, scores}, "NMS");
auto ie = InferenceEngine::Core();
auto net = InferenceEngine::CNNNetwork(f);
auto exec_net = ie.LoadNetwork(net, device_name);
auto infer_req = exec_net.CreateInferRequest();
auto core = ov::Core();
auto exec_net = core.compile_model(f, device_name);
auto infer_req = exec_net.create_infer_request();
ov::Tensor boxes_tensor(ov::element::f32, boxes_shape, in_boxes);
ov::Tensor scores_tensor(ov::element::f32, scores_shape, in_scores);
infer_req.set_tensor(boxes, boxes_tensor);
infer_req.set_tensor(scores, scores_tensor);
infer_req.infer();
InferenceEngine::TensorDesc tDesc1(InferenceEngine::Precision::FP32, boxes_shape,
InferenceEngine::Layout::CHW);
InferenceEngine::TensorDesc tDesc2(InferenceEngine::Precision::FP32, scores_shape,
InferenceEngine::Layout::CHW);
const auto& initial_outputs = f->outputs();
const auto& final_outputs = exec_net.outputs();
InferenceEngine::Blob::Ptr inBlob1 = InferenceEngine::make_shared_blob<float>(tDesc1, in_boxes);
infer_req.SetBlob(boxes->get_name(), inBlob1);
InferenceEngine::Blob::Ptr inBlob2 = InferenceEngine::make_shared_blob<float>(tDesc2, in_scores);
infer_req.SetBlob(scores->get_name(), inBlob2);
infer_req.Infer();
const auto& initial_outputs = net.getOutputsInfo();
const auto& final_outputs = exec_net.GetOutputsInfo();
auto compareOutputNames = [] (const std::pair<std::string, InferenceEngine::CDataPtr>& lhs,
const std::pair<std::string, InferenceEngine::CDataPtr>& rhs)
{ return lhs.first == rhs.first; };
auto compareOutputNames = [] (const ov::Output<ov::Node>& lhs,
const ov::Output<const ov::Node>& rhs)
{ return lhs.get_any_name() == rhs.get_any_name(); };
ASSERT_TRUE(std::equal(initial_outputs.begin(), initial_outputs.end(), final_outputs.begin(), compareOutputNames));
}

View File

@ -6,8 +6,8 @@
#include <gtest/gtest.h>
#include <memory>
#include <openvino/runtime/core.hpp>
#include <openvino/opsets/opset9.hpp>
#include "openvino/runtime/core.hpp"
#include "openvino/opsets/opset9.hpp"
#include "functional_test_utils/skip_tests_config.hpp"
#include "common_test_utils/ov_test_utils.hpp"

View File

@ -4,12 +4,7 @@
#include <vector>
#include <ie_core.hpp>
#include <exec_graph_info.hpp>
#include <ngraph/function.hpp>
#include "functional_test_utils/plugin_cache.hpp"
#include "functional_test_utils/ov_plugin_cache.hpp"
#include "shared_test_classes/base/layer_test_utils.hpp"
#include "common_test_utils/common_utils.hpp"
#include "common_test_utils/node_builders/group_convolution.hpp"
@ -26,24 +21,23 @@ std::string ExecGraphInputsFusingBinConv::getTestCaseName(testing::TestParamInfo
}
void ExecGraphInputsFusingBinConv::SetUp() {
const InferenceEngine::SizeVector inputShapes = { 1, 16, 30, 30}, binConvKernelSize = {2, 2}, convKernelSize = {3, 3};
const std::vector<size_t> inputShapes = { 1, 16, 30, 30}, binConvKernelSize = {2, 2}, convKernelSize = {3, 3};
const size_t numOutChannels = 16, numGroups = 16;
const std::vector<size_t > strides = {1, 1}, dilations = {1, 1};
const std::vector<size_t> strides = {1, 1}, dilations = {1, 1};
const std::vector<ptrdiff_t> padsBegin = {1, 1}, padsEnd = {0, 0};
const ov::op::PadType paddingType = ov::op::PadType::EXPLICIT;
const float padValue = 1.0;
targetDevice = this->GetParam();
ov::ParameterVector params {std::make_shared<ov::op::v0::Parameter>(ngraph::element::f32, ov::Shape(inputShapes))};
ov::ParameterVector params {std::make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::Shape(inputShapes))};
auto binConv = ov::test::utils::make_binary_convolution(params[0], binConvKernelSize, strides, padsBegin, padsEnd, dilations, paddingType, numOutChannels,
padValue);
auto conv = ov::test::utils::make_group_convolution(binConv, ngraph::element::f32, convKernelSize, strides, padsBegin, padsEnd, dilations, paddingType,
numOutChannels, numGroups);
auto conv = ov::test::utils::make_group_convolution(binConv, ov::element::f32, convKernelSize, strides, padsBegin, padsEnd, dilations, paddingType,
numOutChannels, numGroups);
auto biasNode = std::make_shared<ov::op::v0::Constant>(ngraph::element::f32, std::vector<size_t>{16, 1, 1});
auto biasNode = std::make_shared<ov::op::v0::Constant>(ov::element::f32, std::vector<size_t>{16, 1, 1});
auto add = std::make_shared<ov::op::v1::Add>(conv, biasNode);
ngraph::ResultVector results{std::make_shared<ov::op::v0::Result>(add)};
fnPtr = std::make_shared<ngraph::Function>(results, params, "BinConvFuseConv");
ov::ResultVector results{std::make_shared<ov::op::v0::Result>(add)};
ov_model = std::make_shared<ov::Model>(results, params, "BinConvFuseConv");
}
void ExecGraphInputsFusingBinConv::TearDown() {
@ -52,15 +46,15 @@ void ExecGraphInputsFusingBinConv::TearDown() {
TEST_P(ExecGraphInputsFusingBinConv, CheckNumInputsInBinConvFusingWithConv) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
InferenceEngine::CNNNetwork cnnNet(fnPtr);
auto ie = PluginCache::get().ie();
auto execNet = ie->LoadNetwork(cnnNet, targetDevice);
auto targetDevice = this->GetParam();
InferenceEngine::CNNNetwork execGraphInfo = execNet.GetExecGraphInfo();
auto function = execGraphInfo.getFunction();
ASSERT_NE(function, nullptr);
auto core = ov::test::utils::PluginCache::get().core();
auto compiled_model = core->compile_model(ov_model, targetDevice);
for (const auto & op : function->get_ops()) {
auto runtime_model = compiled_model.get_runtime_model();
ASSERT_NE(runtime_model, nullptr);
for (const auto & op : runtime_model->get_ops()) {
const auto & rtInfo = op->get_rt_info();
auto getExecValue = [&rtInfo](const std::string & paramName) -> std::string {
auto it = rtInfo.find(paramName);
@ -76,7 +70,7 @@ TEST_P(ExecGraphInputsFusingBinConv, CheckNumInputsInBinConvFusingWithConv) {
}
}
fnPtr.reset();
ov_model.reset();
};
} // namespace ExecutionGraphTests

View File

@ -5,8 +5,11 @@
#include "execution_graph_tests/remove_parameter.hpp"
#include "functional_test_utils/skip_tests_config.hpp"
#include <inference_engine.hpp>
#include <ngraph/ngraph.hpp>
#include "openvino/runtime/core.hpp"
#include "openvino/core/model.hpp"
#include "openvino/op/add.hpp"
#include "openvino/op/multiply.hpp"
#include "openvino/op/constant.hpp"
namespace ExecutionGraphTests {
@ -24,13 +27,10 @@ TEST_P(ExecGraphRemoveParameterNode, RemoveParameterNode) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
auto device_name = this->GetParam();
ngraph::Shape shape = {3, 2};
ov::Shape shape = {3, 2};
float in_data_2[6] = {1.0, 1.0, 1.0, 1.0, 1.0, 1.0};
float in_data[6] = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0};
ngraph::element::Type type = ngraph::element::f32;
using std::make_shared;
using namespace ngraph::op;
ov::element::Type type = ov::element::f32;
// Some simple graph with 2 Parameters
// in2 in1 //
@ -40,45 +40,36 @@ TEST_P(ExecGraphRemoveParameterNode, RemoveParameterNode) {
// sum //
// | //
// out //
auto input = make_shared<Parameter>(type, shape);
auto input2 = make_shared<Parameter>(type, shape);
auto mul = make_shared<ov::op::v1::Multiply>(input2, input);
auto sum = make_shared<ov::op::v1::Add>(mul, input);
auto input = std::make_shared<ov::op::v0::Parameter>(type, shape);
auto input2 = std::make_shared<ov::op::v0::Parameter>(type, shape);
auto mul = std::make_shared<ov::op::v1::Multiply>(input2, input);
auto sum = std::make_shared<ov::op::v1::Add>(mul, input);
auto function = std::make_shared<ngraph::Function>(
ngraph::NodeVector{sum}, ngraph::ParameterVector{input2, input},
auto function = std::make_shared<ov::Model>(
ov::NodeVector{sum}, ov::ParameterVector{input2, input},
"SimpleNet");
// Load into plugin and get exec graph
auto ie = InferenceEngine::Core();
auto net = InferenceEngine::CNNNetwork(function);
auto exec_net = ie.LoadNetwork(net, device_name);
auto exec_graph = exec_net.GetExecGraphInfo();
auto infer_req = exec_net.CreateInferRequest();
InferenceEngine::TensorDesc tDesc(InferenceEngine::Precision::FP32, shape,
InferenceEngine::Layout::NC);
InferenceEngine::Blob::Ptr inBlob2 =
InferenceEngine::make_shared_blob<float>(tDesc, in_data_2);
infer_req.SetBlob(input2->get_name(), inBlob2);
auto core = ov::Core();
auto compiled_model = core.compile_model(function, device_name);
auto infer_req = compiled_model.create_infer_request();
InferenceEngine::Blob::Ptr inBlob =
InferenceEngine::make_shared_blob<float>(tDesc, in_data);
infer_req.SetBlob(input->get_name(), inBlob);
ov::Tensor tensor2 {ov::element::f32, shape, in_data_2};
infer_req.set_tensor(input2, tensor2);
ov::Tensor tensor {ov::element::f32, shape, in_data};
infer_req.set_tensor(input, tensor);
infer_req.Infer();
infer_req.infer();
auto outBlob = infer_req.GetBlob(sum->get_name());
InferenceEngine::MemoryBlob::CPtr output =
InferenceEngine::as<InferenceEngine::MemoryBlob>(outBlob);
auto outputHolder = output->rmap();
const auto ref_result = outputHolder.as<float *>();
auto out_tensor = infer_req.get_tensor(function->output(0));
auto ref_result = out_tensor.data<float>();
ASSERT_EQ(function->get_parameter_index(input2), 0);
ASSERT_EQ(function->get_parameter_index(input), 1);
// Replace input2 by constant
auto const_in =
make_shared<Constant>(type, shape, std::vector<float>(6, 1.0));
std::make_shared<ov::op::v0::Constant>(type, shape, std::vector<float>(6, 1.0));
mul->input(0).replace_source_output(const_in->output(0));
function->remove_parameter(input2);
@ -86,21 +77,16 @@ TEST_P(ExecGraphRemoveParameterNode, RemoveParameterNode) {
ASSERT_EQ(function->get_parameter_index(input), 0);
// Load new function into plugin and get exec graph
auto new_net = InferenceEngine::CNNNetwork(function);
auto new_exec_net = ie.LoadNetwork(new_net, device_name);
auto new_exec_graph = new_exec_net.GetExecGraphInfo();
auto new_compiled_model = core.compile_model(function, device_name);
// infer new graph
auto new_infer_req = new_exec_net.CreateInferRequest();
new_infer_req.SetBlob(input->get_name(), inBlob);
auto new_infer_req = new_compiled_model.create_infer_request();
new_infer_req.set_tensor(input, tensor);
new_infer_req.Infer();
new_infer_req.infer();
auto new_outBlob = new_infer_req.GetBlob(sum->get_name());
InferenceEngine::MemoryBlob::CPtr new_output =
InferenceEngine::as<InferenceEngine::MemoryBlob>(new_outBlob);
auto new_outputHolder = new_output->rmap();
const auto result = new_outputHolder.as<float *>();
auto new_out_tensor = new_infer_req.get_tensor(function->output(0));
auto result = new_out_tensor.data<float>();
for (int i = 0; i < 6; i++) {
ASSERT_NEAR(result[i], ref_result[i], 1e-5);

View File

@ -9,14 +9,10 @@
#include <string>
#include <functional>
#include <ie_core.hpp>
#include <ngraph/function.hpp>
#include <exec_graph_info.hpp>
#include "exec_graph_info.hpp"
#include "common_test_utils/common_utils.hpp"
#include "functional_test_utils/plugin_cache.hpp"
#include "shared_test_classes/base/layer_test_utils.hpp"
#include "functional_test_utils/blob_utils.hpp"
#include "functional_test_utils/ov_plugin_cache.hpp"
#include "functional_test_utils/skip_tests_config.hpp"
#include "common_test_utils/node_builders/binary_convolution.hpp"
#include "common_test_utils/node_builders/eltwise.hpp"
@ -25,56 +21,56 @@
namespace ExecutionGraphTests {
std::shared_ptr<ngraph::Function> makeEltwiseFunction(const std::vector<InferenceEngine::Precision>& inputPrecisions) {
std::shared_ptr<ov::Model> makeEltwiseFunction(const std::vector<ov::element::Type>& inputPrecisions) {
IE_ASSERT(inputPrecisions.size() == 2);
ov::ParameterVector inputs{std::make_shared<ov::op::v0::Parameter>(FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inputPrecisions[0]),
ov::ParameterVector inputs{std::make_shared<ov::op::v0::Parameter>(inputPrecisions[0],
ov::Shape{1, 16, 5, 4}),
std::make_shared<ov::op::v0::Parameter>(FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inputPrecisions[1]),
std::make_shared<ov::op::v0::Parameter>(inputPrecisions[1],
ov::Shape{1, 16, 5, 4})};
auto eltwise = ov::test::utils::make_eltwise(inputs[0], inputs[1], ngraph::helpers::EltwiseTypes::ADD);
auto eltwise = ov::test::utils::make_eltwise(inputs[0], inputs[1], ov::test::utils::EltwiseTypes::ADD);
eltwise->set_friendly_name("Eltwise");
auto function = std::make_shared<ngraph::Function>(eltwise, inputs, "EltwiseWithTwoDynamicInputs");
auto function = std::make_shared<ov::Model>(eltwise, inputs, "EltwiseWithTwoDynamicInputs");
return function;
}
std::shared_ptr<ngraph::Function> makeFakeQuantizeReluFunction(const std::vector<InferenceEngine::Precision>& inputPrecisions) {
std::shared_ptr<ov::Model> makeFakeQuantizeReluFunction(const std::vector<ov::element::Type>& inputPrecisions) {
IE_ASSERT(inputPrecisions.size() == 1);
ov::ParameterVector inputs{
std::make_shared<ov::op::v0::Parameter>(FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inputPrecisions[0]), ov::Shape{1, 16, 5, 4})};
auto inputLowNode = ngraph::builder::makeConstant<float>(ngraph::element::f32, {1, 1, 1, 1}, {0});
auto inputHighNode = ngraph::builder::makeConstant<float>(ngraph::element::f32, {1, 1, 1, 1}, {255});
auto outputLowNode = ngraph::builder::makeConstant<float>(ngraph::element::f32, {1, 1, 1, 1}, {0});
auto outputHighNode = ngraph::builder::makeConstant<float>(ngraph::element::f32, {1, 1, 1, 1}, {255});
std::make_shared<ov::op::v0::Parameter>(inputPrecisions[0], ov::Shape{1, 16, 5, 4})};
auto inputLowNode = ngraph::builder::makeConstant<float>(ov::element::f32, {1, 1, 1, 1}, {0});
auto inputHighNode = ngraph::builder::makeConstant<float>(ov::element::f32, {1, 1, 1, 1}, {255});
auto outputLowNode = ngraph::builder::makeConstant<float>(ov::element::f32, {1, 1, 1, 1}, {0});
auto outputHighNode = ngraph::builder::makeConstant<float>(ov::element::f32, {1, 1, 1, 1}, {255});
auto fakeQuantize = std::make_shared<ov::op::v0::FakeQuantize>(inputs[0], inputLowNode, inputHighNode, outputLowNode, outputHighNode, 256);
fakeQuantize->set_friendly_name("FakeQuantize");
auto relu = std::make_shared<ov::op::v0::Relu>(fakeQuantize);
relu->set_friendly_name("Relu");
auto function = std::make_shared<ngraph::Function>(relu, inputs, "FakeQuantizeRelu");
auto function = std::make_shared<ov::Model>(relu, inputs, "FakeQuantizeRelu");
return function;
}
std::shared_ptr<ngraph::Function> makeFakeQuantizeBinaryConvolutionFunction(const std::vector<InferenceEngine::Precision> &inputPrecisions) {
std::shared_ptr<ov::Model> makeFakeQuantizeBinaryConvolutionFunction(const std::vector<ov::element::Type> &inputPrecisions) {
IE_ASSERT(inputPrecisions.size() == 1);
ov::ParameterVector inputs{
std::make_shared<ov::op::v0::Parameter>(FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inputPrecisions[0]), ov::Shape{1, 16, 5, 4})};
auto inputLowNode = ngraph::builder::makeConstant<float>(ngraph::element::f32, {1, 1, 1, 1}, {1});
auto inputHighNode = ngraph::builder::makeConstant<float>(ngraph::element::f32, {1, 1, 1, 1}, {1});
auto outputLowNode = ngraph::builder::makeConstant<float>(ngraph::element::f32, {1, 1, 1, 1}, {0});
auto outputHighNode = ngraph::builder::makeConstant<float>(ngraph::element::f32, {1, 1, 1, 1}, {1});
std::make_shared<ov::op::v0::Parameter>(inputPrecisions[0], ov::Shape{1, 16, 5, 4})};
auto inputLowNode = ngraph::builder::makeConstant<float>(ov::element::f32, {1, 1, 1, 1}, {1});
auto inputHighNode = ngraph::builder::makeConstant<float>(ov::element::f32, {1, 1, 1, 1}, {1});
auto outputLowNode = ngraph::builder::makeConstant<float>(ov::element::f32, {1, 1, 1, 1}, {0});
auto outputHighNode = ngraph::builder::makeConstant<float>(ov::element::f32, {1, 1, 1, 1}, {1});
auto fakeQuantize = std::make_shared<ov::op::v0::FakeQuantize>(inputs[0], inputLowNode, inputHighNode, outputLowNode, outputHighNode, 2);
fakeQuantize->set_friendly_name("FakeQuantize");
auto binConv = ov::test::utils::make_binary_convolution(fakeQuantize, {3, 3}, {1, 1}, {1, 1}, {1, 1}, {1, 1}, ov::op::PadType::EXPLICIT, 32, 0);
binConv->set_friendly_name("BinaryConvolution");
auto function = std::make_shared<ngraph::Function>(binConv, inputs, "FakeQuantizeBinaryConvolution");
auto function = std::make_shared<ov::Model>(binConv, inputs, "FakeQuantizeBinaryConvolution");
return function;
}
@ -104,14 +100,13 @@ void ExecGraphRuntimePrecision::TearDown() {
TEST_P(ExecGraphRuntimePrecision, CheckRuntimePrecision) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
InferenceEngine::CNNNetwork cnnNet(fnPtr);
auto ie = PluginCache::get().ie();
auto execNet = ie->LoadNetwork(cnnNet, targetDevice);
const auto execGraph = execNet.GetExecGraphInfo().getFunction();
auto core = ov::test::utils::PluginCache::get().core();
auto execNet = core->compile_model(fnPtr, targetDevice);
const auto execGraph = execNet.get_runtime_model();
auto ops = execGraph->get_ops();
for (auto expectedPrc : expectedPrecisions) {
auto opIter = std::find_if(ops.begin(), ops.end(), [&expectedPrc](std::shared_ptr<ngraph::Node> op) {
auto opIter = std::find_if(ops.begin(), ops.end(), [&expectedPrc](std::shared_ptr<ov::Node> op) {
return op->get_friendly_name() == expectedPrc.first;
});
@ -124,9 +119,9 @@ TEST_P(ExecGraphRuntimePrecision, CheckRuntimePrecision) {
if (rtIter == rtInfo.end())
FAIL() << "Runtime precision is not found for node: " << opIter->get()->get_friendly_name();
if (InferenceEngine::details::convertPrecision(expectedPrc.second).get_type_name() != rtIter->second.as<std::string>())
if (expectedPrc.second.to_string() != rtIter->second.as<std::string>())
FAIL() << "`" << expectedPrc.first << "' node runtime precision mismatch: actual = " <<
rtIter->second.as<std::string>() << ", expected = " << InferenceEngine::details::convertPrecision(expectedPrc.second).get_type_name();
rtIter->second.as<std::string>() << ", expected = " << expectedPrc.second;
}
fnPtr.reset();