Add IR frontend tests (#13178)
* Add IR frontend tests * Fix comments * Add tests * Fix comments 1 * Fix comments 2 * fix comments 3 * fix code style * Fix comments 4 * Fix comments 5 Co-authored-by: Ilya Churaev <ilya.churaev@intel.com>
This commit is contained in:
parent
07aabf27bc
commit
11579530b7
@ -350,6 +350,10 @@ jobs:
|
||||
displayName: 'OV Core UT'
|
||||
continueOnError: false
|
||||
|
||||
- script: $(RUN_PREFIX) $(INSTALL_TEST_DIR)/ov_ir_frontend_tests --gtest_print_time=1 --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-IRFrontend.xml
|
||||
displayName: 'IR Frontend Tests'
|
||||
continueOnError: false
|
||||
|
||||
- script: $(RUN_PREFIX) $(INSTALL_TEST_DIR)/ov_onnx_frontend_tests --gtest_print_time=1 --gtest_filter=-*IE_GPU* --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-ONNXFrontend.xml
|
||||
displayName: 'ONNX Frontend Tests'
|
||||
continueOnError: false
|
||||
|
@ -181,6 +181,10 @@ jobs:
|
||||
continueOnError: false
|
||||
enabled: false
|
||||
|
||||
- script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/ov_ir_frontend_tests --gtest_print_time=1 --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-IRFrontend.xml
|
||||
displayName: 'IR Frontend Tests'
|
||||
continueOnError: false
|
||||
|
||||
- script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/ov_onnx_frontend_tests --gtest_print_time=1 --gtest_filter=-*IE_GPU*--gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-ONNXFrontend.xml
|
||||
displayName: 'ONNX Frontend Tests'
|
||||
continueOnError: false
|
||||
|
@ -237,6 +237,10 @@ jobs:
|
||||
displayName: 'OV Core UT'
|
||||
continueOnError: false
|
||||
|
||||
- script: call $(SETUPVARS) && $(INSTALL_TEST_DIR)\ov_ir_frontend_tests --gtest_print_time=1 --gtest_output=xml:$(INSTALL_TEST_DIR)\TEST-IRFrontend.xml
|
||||
displayName: 'IR Frontend Tests'
|
||||
continueOnError: false
|
||||
|
||||
- script: call $(SETUPVARS) && $(INSTALL_TEST_DIR)\ov_onnx_frontend_tests --gtest_print_time=1 --gtest_filter=-*IE_GPU* --gtest_output=xml:$(INSTALL_TEST_DIR)\TEST-ONNXFrontend.xml
|
||||
displayName: 'ONNX Frontend Tests'
|
||||
continueOnError: false
|
||||
|
@ -3,3 +3,8 @@
|
||||
#
|
||||
|
||||
add_subdirectory(src)
|
||||
|
||||
if(ENABLE_TESTS)
|
||||
add_subdirectory(tests)
|
||||
endif()
|
||||
|
||||
|
22
src/frontends/ir/tests/CMakeLists.txt
Normal file
22
src/frontends/ir/tests/CMakeLists.txt
Normal file
@ -0,0 +1,22 @@
|
||||
# Copyright (C) 2018-2022 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
set(TARGET_NAME ov_ir_frontend_tests)
|
||||
|
||||
ov_add_test_target(
|
||||
NAME ${TARGET_NAME}
|
||||
ROOT ${CMAKE_CURRENT_SOURCE_DIR}
|
||||
DEPENDENCIES
|
||||
openvino_ir_frontend
|
||||
LINK_LIBRARIES
|
||||
gtest
|
||||
gtest_main
|
||||
openvino::runtime::dev
|
||||
commonTestUtils
|
||||
INCLUDES
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/../include"
|
||||
ADD_CLANG_FORMAT
|
||||
LABELS
|
||||
IR_FRONTEND
|
||||
)
|
63
src/frontends/ir/tests/frontend_test.hpp
Normal file
63
src/frontends/ir/tests/frontend_test.hpp
Normal file
@ -0,0 +1,63 @@
|
||||
// Copyright (C) 2018-2022 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include <fstream>
|
||||
|
||||
#include "common_test_utils/graph_comparator.hpp"
|
||||
#include "openvino/frontend/manager.hpp"
|
||||
#include "openvino/openvino.hpp"
|
||||
|
||||
class IRFrontendTestsImpl {
|
||||
protected:
|
||||
ov::Core core;
|
||||
ov::frontend::FrontEndManager manager;
|
||||
|
||||
std::string xmlFileName = "IrFrontendTestModel.xml";
|
||||
std::string binFileName = "IrFrontendTestModel.bin";
|
||||
|
||||
void createTemporalModelFile(std::string xmlFileContent,
|
||||
std::vector<unsigned char> binFileContent = std::vector<unsigned char>()) {
|
||||
ASSERT_TRUE(xmlFileContent.size() > 0);
|
||||
|
||||
{
|
||||
std::ofstream xmlFile;
|
||||
xmlFile.open(xmlFileName);
|
||||
xmlFile << xmlFileContent;
|
||||
xmlFile.close();
|
||||
}
|
||||
|
||||
if (binFileContent.size() > 0) {
|
||||
std::ofstream binFile;
|
||||
binFile.open(binFileName, std::ios::binary);
|
||||
binFile.write((const char*)binFileContent.data(), binFileContent.size());
|
||||
binFile.close();
|
||||
}
|
||||
}
|
||||
|
||||
void RemoveTemporalFiles() {
|
||||
std::remove(xmlFileName.c_str());
|
||||
std::remove(binFileName.c_str());
|
||||
}
|
||||
|
||||
std::shared_ptr<ov::Model> getWithIRFrontend(const std::string& model) {
|
||||
std::istringstream modelStringStream(model);
|
||||
std::istream& modelStream = modelStringStream;
|
||||
|
||||
ov::frontend::FrontEnd::Ptr FE;
|
||||
ov::frontend::InputModel::Ptr inputModel;
|
||||
|
||||
ov::AnyVector params{&modelStream};
|
||||
|
||||
FE = manager.load_by_model(params);
|
||||
if (FE)
|
||||
inputModel = FE->load(params);
|
||||
|
||||
if (inputModel)
|
||||
return FE->convert(inputModel);
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
};
|
959
src/frontends/ir/tests/frontend_test_basic.cpp
Normal file
959
src/frontends/ir/tests/frontend_test_basic.cpp
Normal file
@ -0,0 +1,959 @@
|
||||
// Copyright (C) 2018-2022 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "frontend_test.hpp"
|
||||
#include "openvino/opsets/opset1.hpp"
|
||||
#include "openvino/opsets/opset3.hpp"
|
||||
|
||||
class IRFrontendTests : public ::testing::Test, public IRFrontendTestsImpl {
|
||||
protected:
|
||||
void SetUp() override {}
|
||||
|
||||
void TearDown() override {
|
||||
RemoveTemporalFiles();
|
||||
}
|
||||
};
|
||||
|
||||
TEST_F(IRFrontendTests, elementary_model_reading_v11) {
|
||||
std::string testModelV11 = R"V0G0N(
|
||||
<net name="Network" version="11">
|
||||
<layers>
|
||||
<layer name="input" type="Parameter" id="0" version="opset1">
|
||||
<data element_type="f32" shape="1,3,22,22"/>
|
||||
<output>
|
||||
<port id="0" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>3</dim>
|
||||
<dim>22</dim>
|
||||
<dim>22</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer name="output" type="Result" id="1" version="opset1">
|
||||
<input>
|
||||
<port id="0" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>3</dim>
|
||||
<dim>22</dim>
|
||||
<dim>22</dim>
|
||||
</port>
|
||||
</input>
|
||||
</layer>
|
||||
</layers>
|
||||
<edges>
|
||||
<edge from-layer="0" from-port="0" to-layer="1" to-port="0"/>
|
||||
</edges>
|
||||
</net>
|
||||
)V0G0N";
|
||||
|
||||
std::shared_ptr<ov::Model> model;
|
||||
ov::RTMap rtInfo;
|
||||
uint64_t version;
|
||||
|
||||
ASSERT_NO_THROW(model = getWithIRFrontend(testModelV11));
|
||||
ASSERT_TRUE(!!model);
|
||||
ASSERT_NO_THROW(rtInfo = model->get_rt_info());
|
||||
ASSERT_NO_THROW(version = rtInfo["version"].as<int64_t>());
|
||||
ASSERT_EQ(11, version);
|
||||
|
||||
std::shared_ptr<ov::Model> modelRef;
|
||||
{
|
||||
auto parameter = std::make_shared<ov::opset1::Parameter>(ov::element::f32, ov::Shape{1, 3, 22, 22});
|
||||
parameter->set_friendly_name("input");
|
||||
auto result = std::make_shared<ov::opset1::Result>(parameter);
|
||||
result->set_friendly_name("output");
|
||||
modelRef = std::make_shared<ov::Model>(ov::NodeVector{result}, ov::ParameterVector{parameter});
|
||||
}
|
||||
|
||||
const auto fc = FunctionsComparator::with_default()
|
||||
.enable(FunctionsComparator::ATTRIBUTES)
|
||||
.enable(FunctionsComparator::PRECISIONS)
|
||||
.enable(FunctionsComparator::RUNTIME_KEYS)
|
||||
.enable(FunctionsComparator::NAMES)
|
||||
.enable(FunctionsComparator::CONST_VALUES);
|
||||
const auto res = fc.compare(model, modelRef);
|
||||
EXPECT_TRUE(res.valid) << res.message;
|
||||
}
|
||||
|
||||
TEST_F(IRFrontendTests, elementary_model_reading_v10) {
|
||||
std::string testModelV10 = R"V0G0N(
|
||||
<net name="Network" version="10">
|
||||
<layers>
|
||||
<layer name="input" type="Parameter" id="0" version="opset1">
|
||||
<data element_type="f32" shape="1,3,22,22"/>
|
||||
<output>
|
||||
<port id="0" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>3</dim>
|
||||
<dim>22</dim>
|
||||
<dim>22</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer name="output" type="Result" id="1" version="opset1">
|
||||
<input>
|
||||
<port id="0" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>3</dim>
|
||||
<dim>22</dim>
|
||||
<dim>22</dim>
|
||||
</port>
|
||||
</input>
|
||||
</layer>
|
||||
</layers>
|
||||
<edges>
|
||||
<edge from-layer="0" from-port="0" to-layer="1" to-port="0"/>
|
||||
</edges>
|
||||
</net>
|
||||
)V0G0N";
|
||||
|
||||
std::shared_ptr<ov::Model> modelv10;
|
||||
ov::RTMap rtInfoV10;
|
||||
uint64_t version;
|
||||
|
||||
ASSERT_NO_THROW(modelv10 = getWithIRFrontend(testModelV10));
|
||||
ASSERT_TRUE(!!modelv10);
|
||||
ASSERT_NO_THROW(rtInfoV10 = modelv10->get_rt_info());
|
||||
ASSERT_NO_THROW(version = rtInfoV10["version"].as<int64_t>());
|
||||
ASSERT_EQ(10, version);
|
||||
|
||||
std::shared_ptr<ov::Model> modelRef;
|
||||
{
|
||||
auto parameter = std::make_shared<ov::opset1::Parameter>(ov::element::f32, ov::Shape{1, 3, 22, 22});
|
||||
parameter->set_friendly_name("input");
|
||||
auto result = std::make_shared<ov::opset1::Result>(parameter);
|
||||
result->set_friendly_name("output");
|
||||
modelRef = std::make_shared<ov::Model>(ov::NodeVector{result}, ov::ParameterVector{parameter});
|
||||
}
|
||||
|
||||
const auto fc = FunctionsComparator::with_default()
|
||||
.enable(FunctionsComparator::ATTRIBUTES)
|
||||
.enable(FunctionsComparator::PRECISIONS)
|
||||
.enable(FunctionsComparator::RUNTIME_KEYS)
|
||||
.enable(FunctionsComparator::NAMES)
|
||||
.enable(FunctionsComparator::CONST_VALUES);
|
||||
const auto res = fc.compare(modelv10, modelRef);
|
||||
EXPECT_TRUE(res.valid) << res.message;
|
||||
}
|
||||
|
||||
TEST_F(IRFrontendTests, elementary_model_reading_v9) {
|
||||
std::string testModelV9 = R"V0G0N(
|
||||
<net name="Network" version="9">
|
||||
<layers>
|
||||
<layer name="input" type="Parameter" id="0" version="opset1">
|
||||
<data element_type="f32" shape="1,3,22,22"/>
|
||||
<output>
|
||||
<port id="0" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>3</dim>
|
||||
<dim>22</dim>
|
||||
<dim>22</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer name="output" type="Result" id="1" version="opset1">
|
||||
<input>
|
||||
<port id="0" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>3</dim>
|
||||
<dim>22</dim>
|
||||
<dim>22</dim>
|
||||
</port>
|
||||
</input>
|
||||
</layer>
|
||||
</layers>
|
||||
<edges>
|
||||
<edge from-layer="0" from-port="0" to-layer="1" to-port="0"/>
|
||||
</edges>
|
||||
</net>
|
||||
)V0G0N";
|
||||
|
||||
std::shared_ptr<ov::Model> modelv9;
|
||||
ASSERT_THROW(modelv9 = core.read_model(testModelV9, ov::Tensor()), ov::Exception);
|
||||
ASSERT_FALSE(!!modelv9);
|
||||
}
|
||||
|
||||
TEST_F(IRFrontendTests, model_with_missing_weights) {
|
||||
std::string testModelV11 = R"V0G0N(
|
||||
<net name="Network" version="11">
|
||||
<layers>
|
||||
<layer name="input" type="Parameter" id="0" version="opset1">
|
||||
<data element_type="f32" shape="1,3,22,22"/>
|
||||
<output>
|
||||
<port id="0" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>3</dim>
|
||||
<dim>22</dim>
|
||||
<dim>22</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="1" name="value1" type="Const" version="opset1">
|
||||
<data element_type="i64" shape="4" offset="0" size="32" />
|
||||
<output>
|
||||
<port id="0" precision="I64">
|
||||
<dim>4</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="2" name="Transpose0321" type="Transpose" version="opset1">
|
||||
<input>
|
||||
<port id="0" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>3</dim>
|
||||
<dim>22</dim>
|
||||
<dim>22</dim>
|
||||
</port>
|
||||
<port id="1" precision="I64">
|
||||
<dim>4</dim>
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="2" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>22</dim>
|
||||
<dim>22</dim>
|
||||
<dim>3</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer name="output" type="Result" id="3" version="opset1">
|
||||
<input>
|
||||
<port id="0" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>22</dim>
|
||||
<dim>22</dim>
|
||||
<dim>3</dim>
|
||||
</port>
|
||||
</input>
|
||||
</layer>
|
||||
</layers>
|
||||
<edges>
|
||||
<edge from-layer="0" from-port="0" to-layer="2" to-port="0"/>
|
||||
<edge from-layer="1" from-port="0" to-layer="2" to-port="1"/>
|
||||
<edge from-layer="2" from-port="2" to-layer="3" to-port="0"/>
|
||||
</edges>
|
||||
</net>
|
||||
)V0G0N";
|
||||
|
||||
ASSERT_THROW(core.read_model(testModelV11, ov::Tensor()), ov::Exception);
|
||||
}
|
||||
|
||||
TEST_F(IRFrontendTests, model_with_weights_reading_from_disk) {
|
||||
std::string xmlModel = R"V0G0N(
|
||||
<?xml version="1.0" ?>
|
||||
<net name="Network" version="11">
|
||||
<layers>
|
||||
<layer name="input" type="Parameter" id="0" version="opset1">
|
||||
<data element_type="f32" shape="1,3,22,22"/>
|
||||
<output>
|
||||
<port id="0" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>3</dim>
|
||||
<dim>22</dim>
|
||||
<dim>22</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="1" name="value1" type="Const" version="opset1">
|
||||
<data element_type="i64" shape="4" offset="0" size="32" />
|
||||
<output>
|
||||
<port id="0" precision="I64">
|
||||
<dim>4</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="2" name="Transpose0321" type="Transpose" version="opset1">
|
||||
<input>
|
||||
<port id="0" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>3</dim>
|
||||
<dim>22</dim>
|
||||
<dim>22</dim>
|
||||
</port>
|
||||
<port id="1" precision="I64">
|
||||
<dim>4</dim>
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="2" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>22</dim>
|
||||
<dim>22</dim>
|
||||
<dim>3</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer name="output" type="Result" id="3" version="opset1">
|
||||
<input>
|
||||
<port id="0" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>22</dim>
|
||||
<dim>22</dim>
|
||||
<dim>3</dim>
|
||||
</port>
|
||||
</input>
|
||||
</layer>
|
||||
</layers>
|
||||
<edges>
|
||||
<edge from-layer="0" from-port="0" to-layer="2" to-port="0"/>
|
||||
<edge from-layer="1" from-port="0" to-layer="2" to-port="1"/>
|
||||
<edge from-layer="2" from-port="2" to-layer="3" to-port="0"/>
|
||||
</edges>
|
||||
</net>
|
||||
)V0G0N";
|
||||
|
||||
std::vector<unsigned char> buffer(32, 0);
|
||||
uint64_t* uint64Buffer = reinterpret_cast<uint64_t*>(buffer.data());
|
||||
uint64Buffer[0] = 0;
|
||||
uint64Buffer[1] = 3;
|
||||
uint64Buffer[2] = 2;
|
||||
uint64Buffer[3] = 1;
|
||||
|
||||
createTemporalModelFile(xmlModel, buffer);
|
||||
|
||||
std::shared_ptr<ov::Model> model;
|
||||
|
||||
ASSERT_NO_THROW(model = core.read_model(xmlFileName, binFileName));
|
||||
ASSERT_TRUE(!!model);
|
||||
|
||||
std::shared_ptr<ov::Model> modelRef;
|
||||
{
|
||||
auto parameter = std::make_shared<ov::opset1::Parameter>(ov::element::f32, ov::Shape{1, 3, 22, 22});
|
||||
parameter->set_friendly_name("input");
|
||||
auto constant =
|
||||
std::make_shared<ov::opset1::Constant>(ov::element::i64, ov::Shape{4}, std::vector<uint64_t>{0, 3, 2, 1});
|
||||
constant->set_friendly_name("value1");
|
||||
auto transpose = std::make_shared<ov::opset1::Transpose>(parameter, constant);
|
||||
transpose->set_friendly_name("Transpose0321");
|
||||
auto result = std::make_shared<ov::opset1::Result>(transpose);
|
||||
result->set_friendly_name("output");
|
||||
modelRef = std::make_shared<ov::Model>(ov::NodeVector{result}, ov::ParameterVector{parameter});
|
||||
}
|
||||
|
||||
const auto fc = FunctionsComparator::with_default()
|
||||
.enable(FunctionsComparator::ATTRIBUTES)
|
||||
.enable(FunctionsComparator::PRECISIONS)
|
||||
.enable(FunctionsComparator::RUNTIME_KEYS)
|
||||
.enable(FunctionsComparator::NAMES)
|
||||
.enable(FunctionsComparator::CONST_VALUES);
|
||||
const auto res = fc.compare(model, modelRef);
|
||||
EXPECT_TRUE(res.valid) << res.message;
|
||||
}
|
||||
|
||||
TEST_F(IRFrontendTests, model_without_weights_reading_from_disk) {
|
||||
std::string xmlModel = R"V0G0N(
|
||||
<?xml version="1.0" ?>
|
||||
<net name="Network" version="11">
|
||||
<layers>
|
||||
<layer name="input" type="Parameter" id="0" version="opset1">
|
||||
<data element_type="f32" shape="1,3,22,22"/>
|
||||
<output>
|
||||
<port id="0" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>3</dim>
|
||||
<dim>22</dim>
|
||||
<dim>22</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer name="output" type="Result" id="1" version="opset1">
|
||||
<input>
|
||||
<port id="0" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>3</dim>
|
||||
<dim>22</dim>
|
||||
<dim>22</dim>
|
||||
</port>
|
||||
</input>
|
||||
</layer>
|
||||
</layers>
|
||||
<edges>
|
||||
<edge from-layer="0" from-port="0" to-layer="1" to-port="0"/>
|
||||
</edges>
|
||||
</net>
|
||||
)V0G0N";
|
||||
|
||||
createTemporalModelFile(xmlModel);
|
||||
|
||||
std::shared_ptr<ov::Model> model;
|
||||
|
||||
ASSERT_NO_THROW(model = core.read_model(xmlFileName));
|
||||
ASSERT_TRUE(!!model);
|
||||
|
||||
std::shared_ptr<ov::Model> modelRef;
|
||||
{
|
||||
auto parameter = std::make_shared<ov::opset1::Parameter>(ov::element::f32, ov::Shape{1, 3, 22, 22});
|
||||
parameter->set_friendly_name("input");
|
||||
auto result = std::make_shared<ov::opset1::Result>(parameter);
|
||||
result->set_friendly_name("output");
|
||||
modelRef = std::make_shared<ov::Model>(ov::NodeVector{result}, ov::ParameterVector{parameter});
|
||||
}
|
||||
|
||||
const auto fc = FunctionsComparator::with_default()
|
||||
.enable(FunctionsComparator::ATTRIBUTES)
|
||||
.enable(FunctionsComparator::PRECISIONS)
|
||||
.enable(FunctionsComparator::RUNTIME_KEYS)
|
||||
.enable(FunctionsComparator::NAMES)
|
||||
.enable(FunctionsComparator::CONST_VALUES);
|
||||
const auto res = fc.compare(model, modelRef);
|
||||
EXPECT_TRUE(res.valid) << res.message;
|
||||
}
|
||||
|
||||
TEST_F(IRFrontendTests, model_with_wrong_shape) {
|
||||
std::string xmlModel = R"V0G0N(
|
||||
<?xml version="1.0" ?>
|
||||
<net name="Network" version="11">
|
||||
<layers>
|
||||
<layer name="input" type="Parameter" id="0" version="opset1">
|
||||
<data element_type="f32" shape="1,3,22,22"/>
|
||||
<output>
|
||||
<port id="0" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>3</dim>
|
||||
<dim>22</dim>
|
||||
<dim>22</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="1" name="value1" type="Const" version="opset1">
|
||||
<data element_type="i64" shape="4" offset="0" size="16" />
|
||||
<output>
|
||||
<port id="0" precision="I64">
|
||||
<dim>4</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="2" name="Transpose0321" type="Transpose" version="opset1">
|
||||
<input>
|
||||
<port id="0" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>3</dim>
|
||||
<dim>22</dim>
|
||||
<dim>22</dim>
|
||||
</port>
|
||||
<port id="1" precision="I64">
|
||||
<dim>4</dim>
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="2" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>22</dim>
|
||||
<dim>22</dim>
|
||||
<dim>3</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer name="output" type="Result" id="3" version="opset1">
|
||||
<input>
|
||||
<port id="0" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>22</dim>
|
||||
<dim>22</dim>
|
||||
<dim>3</dim>
|
||||
</port>
|
||||
</input>
|
||||
</layer>
|
||||
</layers>
|
||||
<edges>
|
||||
<edge from-layer="0" from-port="0" to-layer="2" to-port="0"/>
|
||||
<edge from-layer="1" from-port="0" to-layer="2" to-port="1"/>
|
||||
<edge from-layer="2" from-port="2" to-layer="3" to-port="0"/>
|
||||
</edges>
|
||||
</net>
|
||||
)V0G0N";
|
||||
|
||||
std::vector<unsigned char> buffer(32, 0);
|
||||
uint64_t* uint64Buffer = reinterpret_cast<uint64_t*>(buffer.data());
|
||||
uint64Buffer[0] = 0;
|
||||
uint64Buffer[1] = 3;
|
||||
uint64Buffer[2] = 2;
|
||||
uint64Buffer[3] = 1;
|
||||
|
||||
createTemporalModelFile(xmlModel, buffer);
|
||||
std::shared_ptr<ov::Model> model;
|
||||
|
||||
ASSERT_THROW(core.read_model(xmlFileName, binFileName), ov::Exception);
|
||||
}
|
||||
|
||||
TEST_F(IRFrontendTests, model_with_underallocated_weights_reading_from_disk) {
|
||||
std::string xmlModel = R"V0G0N(
|
||||
<?xml version="1.0" ?>
|
||||
<net name="Network" version="11">
|
||||
<layers>
|
||||
<layer name="input" type="Parameter" id="0" version="opset1">
|
||||
<data element_type="f32" shape="1,3,22,22"/>
|
||||
<output>
|
||||
<port id="0" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>3</dim>
|
||||
<dim>22</dim>
|
||||
<dim>22</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="1" name="value1" type="Const" version="opset1">
|
||||
<data element_type="i64" shape="4" offset="0" size="32" />
|
||||
<output>
|
||||
<port id="0" precision="I64">
|
||||
<dim>4</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="2" name="Transpose0321" type="Transpose" version="opset1">
|
||||
<input>
|
||||
<port id="0" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>3</dim>
|
||||
<dim>22</dim>
|
||||
<dim>22</dim>
|
||||
</port>
|
||||
<port id="1" precision="I64">
|
||||
<dim>4</dim>
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="2" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>22</dim>
|
||||
<dim>22</dim>
|
||||
<dim>3</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer name="output" type="Result" id="3" version="opset1">
|
||||
<input>
|
||||
<port id="0" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>22</dim>
|
||||
<dim>22</dim>
|
||||
<dim>3</dim>
|
||||
</port>
|
||||
</input>
|
||||
</layer>
|
||||
</layers>
|
||||
<edges>
|
||||
<edge from-layer="0" from-port="0" to-layer="2" to-port="0"/>
|
||||
<edge from-layer="1" from-port="0" to-layer="2" to-port="1"/>
|
||||
<edge from-layer="2" from-port="2" to-layer="3" to-port="0"/>
|
||||
</edges>
|
||||
</net>
|
||||
)V0G0N";
|
||||
|
||||
std::vector<unsigned char> buffer(24, 0);
|
||||
uint64_t* uint64Buffer = reinterpret_cast<uint64_t*>(buffer.data());
|
||||
uint64Buffer[0] = 0;
|
||||
uint64Buffer[1] = 3;
|
||||
uint64Buffer[2] = 2;
|
||||
|
||||
createTemporalModelFile(xmlModel, buffer);
|
||||
std::shared_ptr<ov::Model> model;
|
||||
|
||||
ASSERT_THROW(core.read_model(xmlFileName, binFileName), ov::Exception);
|
||||
}
|
||||
|
||||
TEST_F(IRFrontendTests, model_with_missing_weights_from_disk) {
|
||||
std::string xmlModel = R"V0G0N(
|
||||
<?xml version="1.0" ?>
|
||||
<net name="Network" version="11">
|
||||
<layers>
|
||||
<layer name="input" type="Parameter" id="0" version="opset1">
|
||||
<data element_type="f32" shape="1,3,22,22"/>
|
||||
<output>
|
||||
<port id="0" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>3</dim>
|
||||
<dim>22</dim>
|
||||
<dim>22</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="1" name="value1" type="Const" version="opset1">
|
||||
<data element_type="i64" shape="4" offset="0" size="32" />
|
||||
<output>
|
||||
<port id="0" precision="I64">
|
||||
<dim>4</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="2" name="Transpose0321" type="Transpose" version="opset1">
|
||||
<input>
|
||||
<port id="0" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>3</dim>
|
||||
<dim>22</dim>
|
||||
<dim>22</dim>
|
||||
</port>
|
||||
<port id="1" precision="I64">
|
||||
<dim>4</dim>
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="2" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>22</dim>
|
||||
<dim>22</dim>
|
||||
<dim>3</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer name="output" type="Result" id="3" version="opset1">
|
||||
<input>
|
||||
<port id="0" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>22</dim>
|
||||
<dim>22</dim>
|
||||
<dim>3</dim>
|
||||
</port>
|
||||
</input>
|
||||
</layer>
|
||||
</layers>
|
||||
<edges>
|
||||
<edge from-layer="0" from-port="0" to-layer="2" to-port="0"/>
|
||||
<edge from-layer="1" from-port="0" to-layer="2" to-port="1"/>
|
||||
<edge from-layer="2" from-port="2" to-layer="3" to-port="0"/>
|
||||
</edges>
|
||||
</net>
|
||||
)V0G0N";
|
||||
|
||||
createTemporalModelFile(xmlModel);
|
||||
std::shared_ptr<ov::Model> model;
|
||||
|
||||
ASSERT_THROW(core.read_model(xmlFileName, binFileName), ov::Exception);
|
||||
}
|
||||
|
||||
TEST_F(IRFrontendTests, missing_layer_data) {
|
||||
std::string model = R"V0G0N(
|
||||
<net name="Network" version="11">
|
||||
<layers>
|
||||
<layer name="input" type="Parameter" id="0" version="opset1">
|
||||
<output>
|
||||
<port id="0" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>3</dim>
|
||||
<dim>22</dim>
|
||||
<dim>22</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer name="output" type="Result" id="1" version="opset1">
|
||||
<input>
|
||||
<port id="0" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>3</dim>
|
||||
<dim>22</dim>
|
||||
<dim>22</dim>
|
||||
</port>
|
||||
</input>
|
||||
</layer>
|
||||
</layers>
|
||||
<edges>
|
||||
<edge from-layer="0" from-port="0" to-layer="1" to-port="0"/>
|
||||
</edges>
|
||||
</net>
|
||||
)V0G0N";
|
||||
|
||||
ASSERT_THROW(core.read_model(model, ov::Tensor()), ov::Exception);
|
||||
}
|
||||
|
||||
TEST_F(IRFrontendTests, model_with_wrong_dimensions) {
|
||||
std::string testModel = R"V0G0N(
|
||||
<net name="Network" version="11">
|
||||
<layers>
|
||||
<layer name="input" type="Parameter" id="0" version="opset1">
|
||||
<data element_type="f32" shape="1,3,22,22"/>
|
||||
<output>
|
||||
<port id="0" precision="FP32">
|
||||
<dim>-2</dim>
|
||||
<dim>3</dim>
|
||||
<dim>22</dim>
|
||||
<dim>22</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer name="output" type="Result" id="1" version="opset1">
|
||||
<input>
|
||||
<port id="0" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>3</dim>
|
||||
<dim>22</dim>
|
||||
<dim>22</dim>
|
||||
</port>
|
||||
</input>
|
||||
</layer>
|
||||
</layers>
|
||||
<edges>
|
||||
<edge from-layer="0" from-port="0" to-layer="1" to-port="0"/>
|
||||
</edges>
|
||||
</net>
|
||||
)V0G0N";
|
||||
|
||||
std::shared_ptr<ov::Model> model;
|
||||
|
||||
ASSERT_THROW(model = core.read_model(testModel, ov::Tensor()), ov::Exception);
|
||||
ASSERT_TRUE(!model);
|
||||
}
|
||||
|
||||
TEST_F(IRFrontendTests, name_is_not_unique) {
|
||||
std::string xmlModel = R"V0G0N(
|
||||
<net name="Network" version="11">
|
||||
<layers>
|
||||
<layer name="input" type="Parameter" id="0" version="opset1">
|
||||
<data element_type="f32" shape="1,3,22,22"/>
|
||||
<output>
|
||||
<port id="0" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>3</dim>
|
||||
<dim>22</dim>
|
||||
<dim>22</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="1" name="testname" type="Const" version="opset1">
|
||||
<data element_type="i64" shape="4" offset="0" size="32" />
|
||||
<output>
|
||||
<port id="0" precision="I64">
|
||||
<dim>4</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="2" name="testname" type="Transpose" version="opset1">
|
||||
<input>
|
||||
<port id="0" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>3</dim>
|
||||
<dim>22</dim>
|
||||
<dim>22</dim>
|
||||
</port>
|
||||
<port id="1" precision="I64">
|
||||
<dim>4</dim>
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="2" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>22</dim>
|
||||
<dim>22</dim>
|
||||
<dim>3</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer name="output" type="Result" id="3" version="opset1">
|
||||
<input>
|
||||
<port id="0" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>22</dim>
|
||||
<dim>22</dim>
|
||||
<dim>3</dim>
|
||||
</port>
|
||||
</input>
|
||||
</layer>
|
||||
</layers>
|
||||
<edges>
|
||||
<edge from-layer="0" from-port="0" to-layer="2" to-port="0"/>
|
||||
<edge from-layer="1" from-port="0" to-layer="2" to-port="1"/>
|
||||
<edge from-layer="2" from-port="2" to-layer="3" to-port="0"/>
|
||||
</edges>
|
||||
</net>
|
||||
)V0G0N";
|
||||
|
||||
std::vector<unsigned char> buffer(32, 0);
|
||||
int64_t* int64Buffer = reinterpret_cast<int64_t*>(buffer.data());
|
||||
int64Buffer[0] = 0;
|
||||
int64Buffer[1] = 3;
|
||||
int64Buffer[2] = 2;
|
||||
int64Buffer[3] = 1;
|
||||
|
||||
createTemporalModelFile(xmlModel, buffer);
|
||||
std::shared_ptr<ov::Model> model;
|
||||
|
||||
ASSERT_THROW(core.read_model(xmlFileName, binFileName), ov::Exception);
|
||||
}
|
||||
|
||||
TEST_F(IRFrontendTests, edge_has_wrong_port_id) {
|
||||
std::string testModel = R"V0G0N(
|
||||
<net name="Network" version="11">
|
||||
<layers>
|
||||
<layer name="input" type="Parameter" id="0" version="opset1">
|
||||
<data element_type="f32" shape="1,3,22,22"/>
|
||||
<output>
|
||||
<port id="0" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>3</dim>
|
||||
<dim>22</dim>
|
||||
<dim>22</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer name="output" type="Result" id="1" version="opset1">
|
||||
<input>
|
||||
<port id="0" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>3</dim>
|
||||
<dim>22</dim>
|
||||
<dim>22</dim>
|
||||
</port>
|
||||
</input>
|
||||
</layer>
|
||||
</layers>
|
||||
<edges>
|
||||
<edge from-layer="0" from-port="0" to-layer="1" to-port="10"/>
|
||||
</edges>
|
||||
</net>
|
||||
)V0G0N";
|
||||
|
||||
std::shared_ptr<ov::Model> model;
|
||||
|
||||
ASSERT_THROW(model = core.read_model(testModel, ov::Tensor()), ov::Exception);
|
||||
ASSERT_FALSE(!!model);
|
||||
}
|
||||
|
||||
TEST_F(IRFrontendTests, edge_has_wrong_layer_id) {
|
||||
std::string testModel = R"V0G0N(
|
||||
<net name="Network" version="11">
|
||||
<layers>
|
||||
<layer name="input" type="Parameter" id="0" version="opset1">
|
||||
<data element_type="f32" shape="1,3,22,22"/>
|
||||
<output>
|
||||
<port id="0" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>3</dim>
|
||||
<dim>22</dim>
|
||||
<dim>22</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer name="output" type="Result" id="1" version="opset1">
|
||||
<input>
|
||||
<port id="0" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>3</dim>
|
||||
<dim>22</dim>
|
||||
<dim>22</dim>
|
||||
</port>
|
||||
</input>
|
||||
</layer>
|
||||
</layers>
|
||||
<edges>
|
||||
<edge from-layer="0" from-port="0" to-layer="2" to-port="0"/>
|
||||
</edges>
|
||||
</net>
|
||||
)V0G0N";
|
||||
|
||||
std::shared_ptr<ov::Model> model;
|
||||
|
||||
ASSERT_THROW(model = core.read_model(testModel, ov::Tensor()), ov::Exception);
|
||||
ASSERT_FALSE(!!model);
|
||||
}
|
||||
|
||||
TEST_F(IRFrontendTests, not_opset1) {
|
||||
std::string testModel = R"V0G0N(
|
||||
<net name="Network" version="11">
|
||||
<layers>
|
||||
<layer name="input" type="Parameter" id="0" version="opset1">
|
||||
<data element_type="f32" shape="1,3,22,22"/>
|
||||
<output>
|
||||
<port id="0" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>3</dim>
|
||||
<dim>22</dim>
|
||||
<dim>22</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="1" name="shapeof" type="ShapeOf" version="opset3">
|
||||
<data output_type="i32" />
|
||||
<input>
|
||||
<port id="0" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>3</dim>
|
||||
<dim>22</dim>
|
||||
<dim>22</dim>
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="1" precision="I32">
|
||||
<dim>4</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer name="output" type="Result" id="2" version="opset1">
|
||||
<input>
|
||||
<port id="0" precision="I32">
|
||||
<dim>4</dim>
|
||||
</port>
|
||||
</input>
|
||||
</layer>
|
||||
</layers>
|
||||
<edges>
|
||||
<edge from-layer="0" from-port="0" to-layer="1" to-port="0"/>
|
||||
<edge from-layer="1" from-port="1" to-layer="2" to-port="0"/>
|
||||
</edges>
|
||||
</net>
|
||||
)V0G0N";
|
||||
|
||||
std::shared_ptr<ov::Model> model;
|
||||
|
||||
ASSERT_NO_THROW(model = getWithIRFrontend(testModel));
|
||||
ASSERT_TRUE(!!model);
|
||||
|
||||
std::shared_ptr<ov::Model> modelRef;
|
||||
{
|
||||
auto parameter = std::make_shared<ov::opset1::Parameter>(ov::element::f32, ov::Shape{1, 3, 22, 22});
|
||||
parameter->set_friendly_name("input");
|
||||
auto shapeof = std::make_shared<ov::opset3::ShapeOf>(parameter, ov::element::i32);
|
||||
shapeof->set_friendly_name("shapeof");
|
||||
auto result = std::make_shared<ov::opset1::Result>(shapeof);
|
||||
result->set_friendly_name("output");
|
||||
modelRef = std::make_shared<ov::Model>(ov::NodeVector{result}, ov::ParameterVector{parameter});
|
||||
}
|
||||
|
||||
const auto fc = FunctionsComparator::with_default()
|
||||
.enable(FunctionsComparator::ATTRIBUTES)
|
||||
.enable(FunctionsComparator::PRECISIONS)
|
||||
.enable(FunctionsComparator::RUNTIME_KEYS)
|
||||
.enable(FunctionsComparator::NAMES)
|
||||
.enable(FunctionsComparator::CONST_VALUES);
|
||||
const auto res = fc.compare(model, modelRef);
|
||||
EXPECT_TRUE(res.valid) << res.message;
|
||||
}
|
||||
|
||||
TEST_F(IRFrontendTests, wrong_opset) {
|
||||
std::string testModel = R"V0G0N(
|
||||
<net name="Network" version="11">
|
||||
<layers>
|
||||
<layer name="input" type="Parameter" id="0" version="wrongOpset">
|
||||
<data element_type="f32" shape="1,3,22,22"/>
|
||||
<output>
|
||||
<port id="0" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>3</dim>
|
||||
<dim>22</dim>
|
||||
<dim>22</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer name="output" type="Result" id="1" version="opset1">
|
||||
<input>
|
||||
<port id="0" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>3</dim>
|
||||
<dim>22</dim>
|
||||
<dim>22</dim>
|
||||
</port>
|
||||
</input>
|
||||
</layer>
|
||||
</layers>
|
||||
<edges>
|
||||
<edge from-layer="0" from-port="0" to-layer="1" to-port="0"/>
|
||||
</edges>
|
||||
</net>
|
||||
)V0G0N";
|
||||
|
||||
std::shared_ptr<ov::Model> model;
|
||||
|
||||
ASSERT_THROW(model = core.read_model(testModel, ov::Tensor()), ov::Exception);
|
||||
ASSERT_FALSE(!!model);
|
||||
}
|
131
src/frontends/ir/tests/frontend_test_with_extensions.cpp
Normal file
131
src/frontends/ir/tests/frontend_test_with_extensions.cpp
Normal file
@ -0,0 +1,131 @@
|
||||
// Copyright (C) 2018-2022 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "frontend_test.hpp"
|
||||
#include "openvino/op/util/framework_node.hpp"
|
||||
#include "openvino/opsets/opset1.hpp"
|
||||
|
||||
class IRFrontendExtensionTests : public ::testing::Test, public IRFrontendTestsImpl {
|
||||
protected:
|
||||
void SetUp() override {}
|
||||
|
||||
void TearDown() override {
|
||||
RemoveTemporalFiles();
|
||||
}
|
||||
};
|
||||
|
||||
TEST_F(IRFrontendExtensionTests, custom_ops_test_with_framework_node_extension) {
|
||||
static std::string customOpsModel = R"V0G0N(
|
||||
<net name="Network" version="11">
|
||||
<layers>
|
||||
<layer name="input" type="Parameter" id="0" version="opset1">
|
||||
<data element_type="f32" shape="1,3,22,22"/>
|
||||
<output>
|
||||
<port id="0" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>3</dim>
|
||||
<dim>22</dim>
|
||||
<dim>22</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer name="customOp1" id="1" type="testtype" version="testopset">
|
||||
<input>
|
||||
<port id="0" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>3</dim>
|
||||
<dim>2</dim>
|
||||
<dim>2</dim>
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="1" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>3</dim>
|
||||
<dim>2</dim>
|
||||
<dim>2</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer name="output" type="Result" id="2" version="opset1">
|
||||
<input>
|
||||
<port id="0" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>3</dim>
|
||||
<dim>22</dim>
|
||||
<dim>22</dim>
|
||||
</port>
|
||||
</input>
|
||||
</layer>
|
||||
</layers>
|
||||
<edges>
|
||||
<edge from-layer="0" from-port="0" to-layer="1" to-port="0"/>
|
||||
<edge from-layer="1" from-port="1" to-layer="2" to-port="0"/>
|
||||
</edges>
|
||||
</net>
|
||||
)V0G0N";
|
||||
std::shared_ptr<ov::Model> model;
|
||||
auto extension = std::make_shared<ov::OpExtension<ov::op::util::FrameworkNode>>();
|
||||
|
||||
core.add_extension(extension);
|
||||
|
||||
ASSERT_NO_THROW(model = core.read_model(customOpsModel, ov::Tensor()));
|
||||
ASSERT_TRUE(!!model);
|
||||
}
|
||||
|
||||
TEST_F(IRFrontendExtensionTests, custom_ops_test_without_extension) {
|
||||
static std::string customOpsModel = R"V0G0N(
|
||||
<net name="Network" version="11">
|
||||
<layers>
|
||||
<layer name="input" type="Parameter" id="0" version="opset1">
|
||||
<data element_type="f32" shape="1,3,22,22"/>
|
||||
<output>
|
||||
<port id="0" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>3</dim>
|
||||
<dim>22</dim>
|
||||
<dim>22</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer name="customOp1" id="1" type="testtype" version="testopset">
|
||||
<input>
|
||||
<port id="0" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>3</dim>
|
||||
<dim>2</dim>
|
||||
<dim>2</dim>
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="1" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>3</dim>
|
||||
<dim>2</dim>
|
||||
<dim>2</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer name="output" type="Result" id="2" version="opset1">
|
||||
<input>
|
||||
<port id="0" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>3</dim>
|
||||
<dim>22</dim>
|
||||
<dim>22</dim>
|
||||
</port>
|
||||
</input>
|
||||
</layer>
|
||||
</layers>
|
||||
<edges>
|
||||
<edge from-layer="0" from-port="0" to-layer="1" to-port="0"/>
|
||||
<edge from-layer="1" from-port="1" to-layer="2" to-port="0"/>
|
||||
</edges>
|
||||
</net>
|
||||
)V0G0N";
|
||||
std::shared_ptr<ov::Model> model;
|
||||
|
||||
ASSERT_THROW(model = core.read_model(customOpsModel, ov::Tensor()), ov::Exception);
|
||||
ASSERT_TRUE(!model);
|
||||
}
|
@ -4,22 +4,18 @@
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include <inference_engine.hpp>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include "openvino/frontend/manager.hpp"
|
||||
|
||||
#include "common_test_utils/graph_comparator.hpp"
|
||||
#include "ie_blob.h"
|
||||
#include "ngraph/op/parameter.hpp"
|
||||
#include "ngraph/type/element_type.hpp"
|
||||
#include "ngraph/variant.hpp"
|
||||
#include "openvino/core/preprocess/input_tensor_info.hpp"
|
||||
#include "openvino/frontend/manager.hpp"
|
||||
#include "openvino/opsets/opset8.hpp"
|
||||
#include "openvino/runtime/core.hpp"
|
||||
|
||||
class PartialShapeDeserialization : public testing::Test {
|
||||
protected:
|
||||
std::shared_ptr<ngraph::Function> getWithIRFrontend(const std::string& model) {
|
||||
std::shared_ptr<ov::Model> getWithIRFrontend(const std::string& model) {
|
||||
std::istringstream modelStringStream(model);
|
||||
std::istream& modelStream = modelStringStream;
|
||||
|
||||
@ -42,7 +38,7 @@ private:
|
||||
ov::frontend::FrontEndManager manager;
|
||||
};
|
||||
|
||||
TEST_F(PartialShapeDeserialization, ShapeWithBoundariesTestCase1) {
|
||||
TEST_F(PartialShapeDeserialization, shape_with_boundaries_test_case1) {
|
||||
std::string model = R"V0G0N(
|
||||
<net name="Network" version="11">
|
||||
<layers>
|
||||
@ -124,7 +120,7 @@ TEST_F(PartialShapeDeserialization, ShapeWithBoundariesTestCase1) {
|
||||
EXPECT_TRUE(res.valid) << res.message;
|
||||
}
|
||||
|
||||
TEST_F(PartialShapeDeserialization, ShapeWithBoundariesTestCase2) {
|
||||
TEST_F(PartialShapeDeserialization, shape_with_boundaries_testC_case2) {
|
||||
std::string model = R"V0G0N(
|
||||
<net name="Network" version="11">
|
||||
<layers>
|
||||
@ -206,7 +202,7 @@ TEST_F(PartialShapeDeserialization, ShapeWithBoundariesTestCase2) {
|
||||
EXPECT_TRUE(res.valid) << res.message;
|
||||
}
|
||||
|
||||
TEST_F(PartialShapeDeserialization, ShapeWithBoundariesTestDynamicRank) {
|
||||
TEST_F(PartialShapeDeserialization, shape_with_boundaries_test_dynamic_rank) {
|
||||
std::string model = R"V0G0N(
|
||||
<net name="Network" version="11">
|
||||
<layers>
|
||||
@ -263,17 +259,16 @@ TEST_F(PartialShapeDeserialization, ShapeWithBoundariesTestDynamicRank) {
|
||||
f_11_ref->set_friendly_name("Network");
|
||||
|
||||
const auto fc = FunctionsComparator::with_default()
|
||||
.enable(FunctionsComparator::ATTRIBUTES)
|
||||
.enable(FunctionsComparator::PRECISIONS)
|
||||
.enable(FunctionsComparator::RUNTIME_KEYS)
|
||||
.enable(FunctionsComparator::NAMES)
|
||||
.enable(FunctionsComparator::CONST_VALUES);
|
||||
.enable(FunctionsComparator::ATTRIBUTES)
|
||||
.enable(FunctionsComparator::PRECISIONS)
|
||||
.enable(FunctionsComparator::RUNTIME_KEYS)
|
||||
.enable(FunctionsComparator::NAMES)
|
||||
.enable(FunctionsComparator::CONST_VALUES);
|
||||
auto res = fc.compare(f, f_11_ref);
|
||||
EXPECT_TRUE(res.valid) << res.message;
|
||||
}
|
||||
|
||||
|
||||
TEST_F(PartialShapeDeserialization, ShapeWithBoundariesTestDynamicRankNegative) {
|
||||
TEST_F(PartialShapeDeserialization, shape_with_boundaries_test_dynamic_rank_negative) {
|
||||
std::string model = R"V0G0N(
|
||||
<net name="Network" version="11">
|
||||
<layers>
|
||||
@ -297,10 +292,10 @@ TEST_F(PartialShapeDeserialization, ShapeWithBoundariesTestDynamicRankNegative)
|
||||
</net>
|
||||
)V0G0N";
|
||||
// TODO: change to ov::Exception (69781)
|
||||
ASSERT_ANY_THROW(getWithIRFrontend(model));
|
||||
ASSERT_THROW(getWithIRFrontend(model), ov::Exception);
|
||||
}
|
||||
|
||||
TEST_F(PartialShapeDeserialization, ShapeWithBoundariesTestDynamicDimNegative) {
|
||||
TEST_F(PartialShapeDeserialization, shape_with_boundaries_test_dynamic_dim_negative) {
|
||||
std::string model = R"V0G0N(
|
||||
<net name="Network" version="11">
|
||||
<layers>
|
||||
@ -324,10 +319,10 @@ TEST_F(PartialShapeDeserialization, ShapeWithBoundariesTestDynamicDimNegative) {
|
||||
</net>
|
||||
)V0G0N";
|
||||
// TODO: change to ov::Exception (69781)
|
||||
ASSERT_ANY_THROW(getWithIRFrontend(model));
|
||||
ASSERT_THROW(getWithIRFrontend(model), ov::Exception);
|
||||
}
|
||||
|
||||
TEST_F(PartialShapeDeserialization, ShapeWithBoundariesTestWrongDim) {
|
||||
TEST_F(PartialShapeDeserialization, shape_with_boundaries_test_wrong_dim) {
|
||||
std::string model = R"V0G0N(
|
||||
<net name="Network" version="11">
|
||||
<layers>
|
||||
@ -351,10 +346,10 @@ TEST_F(PartialShapeDeserialization, ShapeWithBoundariesTestWrongDim) {
|
||||
</net>
|
||||
)V0G0N";
|
||||
// TODO: change to ov::Exception (69781)
|
||||
ASSERT_ANY_THROW(getWithIRFrontend(model));
|
||||
ASSERT_THROW(getWithIRFrontend(model), ov::Exception);
|
||||
}
|
||||
|
||||
TEST_F(PartialShapeDeserialization, ShapeWithBoundariesTestWrongBoundary) {
|
||||
TEST_F(PartialShapeDeserialization, shape_with_boundaries_test_wrong_boundary) {
|
||||
std::string model = R"V0G0N(
|
||||
<net name="Network" version="11">
|
||||
<layers>
|
||||
@ -378,5 +373,5 @@ TEST_F(PartialShapeDeserialization, ShapeWithBoundariesTestWrongBoundary) {
|
||||
</net>
|
||||
)V0G0N";
|
||||
// TODO: change to ov::Exception (69781)
|
||||
ASSERT_ANY_THROW(getWithIRFrontend(model));
|
||||
ASSERT_THROW(getWithIRFrontend(model), ov::Exception);
|
||||
}
|
73
src/frontends/ir/tests/pre_processing_deserialization.cpp
Normal file
73
src/frontends/ir/tests/pre_processing_deserialization.cpp
Normal file
@ -0,0 +1,73 @@
|
||||
// Copyright (C) 2018-2022 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "frontend_test.hpp"
|
||||
|
||||
class IRFrontendTestsPreProcessing : public ::testing::Test, public IRFrontendTestsImpl {
|
||||
protected:
|
||||
void SetUp() override {}
|
||||
|
||||
void TearDown() override {
|
||||
RemoveTemporalFiles();
|
||||
}
|
||||
};
|
||||
|
||||
TEST_F(IRFrontendTestsPreProcessing, pre_processing) {
|
||||
std::string xmlModel = R"V0G0N(
|
||||
<?xml version="1.0" ?>
|
||||
<net name="Network" version="10">
|
||||
<pre-process mean-precision="FP32" reference-layer-name="input">
|
||||
<channel id="0">
|
||||
<mean offset="0" size="1936"/>
|
||||
</channel>
|
||||
<channel id="1">
|
||||
<mean offset="1936" size="1936"/>
|
||||
</channel>
|
||||
<channel id="2">
|
||||
<mean offset="3872" size="1936"/>
|
||||
</channel>
|
||||
</pre-process>
|
||||
<layers>
|
||||
<layer name="input" type="Parameter" id="0" version="opset1">
|
||||
<data shape="1,3,22,22" element_type="f32"/>
|
||||
<output>
|
||||
<port id="0" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>3</dim>
|
||||
<dim>22</dim>
|
||||
<dim>22</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer name="output" type="Result" id="1" version="opset1">
|
||||
<input>
|
||||
<port id="0" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>3</dim>
|
||||
<dim>22</dim>
|
||||
<dim>22</dim>
|
||||
</port>
|
||||
</input>
|
||||
</layer>
|
||||
</layers>
|
||||
<edges>
|
||||
<edge from-layer="0" from-port="0" to-layer="1" to-port="0"/>
|
||||
</edges>
|
||||
</net>
|
||||
)V0G0N";
|
||||
|
||||
int dataSizeinFloat = 22 * 22 * 3;
|
||||
std::vector<unsigned char> buffer(dataSizeinFloat * sizeof(float), 0);
|
||||
float* floatBuffer = reinterpret_cast<float*>(buffer.data());
|
||||
for (int i = 0; i < dataSizeinFloat; i++) {
|
||||
floatBuffer[i] = 1;
|
||||
}
|
||||
|
||||
createTemporalModelFile(xmlModel, buffer);
|
||||
|
||||
std::shared_ptr<ov::Model> model;
|
||||
|
||||
ASSERT_NO_THROW(model = core.read_model(xmlFileName, binFileName));
|
||||
ASSERT_TRUE(!!model);
|
||||
}
|
@ -4,36 +4,22 @@
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include <memory>
|
||||
#include <sstream>
|
||||
#include <string>
|
||||
#include <memory>
|
||||
|
||||
#include <inference_engine.hpp>
|
||||
#include <memory>
|
||||
#include <ngraph/opsets/opset8.hpp>
|
||||
#include <string>
|
||||
#include <transformations/rt_info/fused_names_attribute.hpp>
|
||||
#include <transformations/rt_info/old_api_map_order_attribute.hpp>
|
||||
#include <transformations/rt_info/old_api_map_element_type_attribute.hpp>
|
||||
#include "openvino/frontend/manager.hpp"
|
||||
#include "common_test_utils/graph_comparator.hpp"
|
||||
#include "ie_blob.h"
|
||||
#include "ie_precision.hpp"
|
||||
#include "ngraph/node.hpp"
|
||||
#include "ngraph/op/parameter.hpp"
|
||||
#include "ngraph/pass/pass.hpp"
|
||||
#include "ngraph/shape.hpp"
|
||||
#include "ngraph/type/element_type.hpp"
|
||||
#include "ngraph/variant.hpp"
|
||||
#include "ngraph/pass/manager.hpp"
|
||||
#include "openvino/runtime/core.hpp"
|
||||
#include "ie/ie_core.hpp"
|
||||
#include "openvino/core/preprocess/input_tensor_info.hpp"
|
||||
|
||||
using namespace ngraph;
|
||||
#include "openvino/frontend/manager.hpp"
|
||||
#include "openvino/runtime/core.hpp"
|
||||
#include "transformations/rt_info/fused_names_attribute.hpp"
|
||||
#include "transformations/rt_info/old_api_map_element_type_attribute.hpp"
|
||||
#include "transformations/rt_info/old_api_map_order_attribute.hpp"
|
||||
|
||||
class RTInfoDeserialization : public testing::Test {
|
||||
protected:
|
||||
std::shared_ptr<ngraph::Function> getWithIRFrontend(const std::string& model) {
|
||||
std::shared_ptr<ov::Model> getWithIRFrontend(const std::string& model) {
|
||||
std::istringstream modelStringStream(model);
|
||||
std::istream& modelStream = modelStringStream;
|
||||
|
||||
@ -56,7 +42,7 @@ private:
|
||||
ov::frontend::FrontEndManager manager;
|
||||
};
|
||||
|
||||
TEST_F(RTInfoDeserialization, NodeV10) {
|
||||
TEST_F(RTInfoDeserialization, node_v10) {
|
||||
std::string model = R"V0G0N(
|
||||
<net name="Network" version="10">
|
||||
<layers>
|
||||
@ -121,9 +107,8 @@ TEST_F(RTInfoDeserialization, NodeV10) {
|
||||
auto f = getWithIRFrontend(model);
|
||||
ASSERT_NE(nullptr, f);
|
||||
|
||||
auto check_rt_info = [](const RTMap& info) {
|
||||
const std::string& key = ngraph::FusedNames::get_type_info_static();
|
||||
EXPECT_FALSE(info.count(key));
|
||||
auto check_rt_info = [](const ov::RTMap& info) {
|
||||
EXPECT_FALSE(info.count(ngraph::FusedNames::get_type_info_static()));
|
||||
|
||||
const std::string& key_old_api_order = ov::OldApiMapOrder::get_type_info_static();
|
||||
EXPECT_FALSE(info.count(key_old_api_order));
|
||||
@ -161,28 +146,25 @@ TEST_F(RTInfoDeserialization, NodeV10) {
|
||||
|
||||
// read IR v10 with new API and check that CNNNetwork precision conversions are applied
|
||||
{
|
||||
ngraph::Shape shape{1, 3, 22, 22};
|
||||
auto type = ngraph::element::f32;
|
||||
auto param = std::make_shared<ngraph::opset8::Parameter>(type, shape);
|
||||
ov::Shape shape{1, 3, 22, 22};
|
||||
auto type = ov::element::f32;
|
||||
auto param = std::make_shared<ov::opset8::Parameter>(type, shape);
|
||||
param->set_friendly_name("in1");
|
||||
param->get_output_tensor(0).set_names({"input_tensor", param->get_friendly_name()});
|
||||
|
||||
// TODO: No guarantee that exactly 'Convert' will be added
|
||||
auto convert_param = std::make_shared<opset8::Convert>(param, ngraph::element::f16);
|
||||
auto convert_param = std::make_shared<ov::opset8::Convert>(param, ov::element::f16);
|
||||
|
||||
auto round = std::make_shared<opset8::Round>(convert_param,
|
||||
ngraph::opset8::Round::RoundMode::HALF_TO_EVEN);
|
||||
auto round = std::make_shared<ov::opset8::Round>(convert_param, ov::opset8::Round::RoundMode::HALF_TO_EVEN);
|
||||
|
||||
auto convert_result = std::make_shared<opset8::Convert>(round, type);
|
||||
auto convert_result = std::make_shared<ov::opset8::Convert>(round, type);
|
||||
convert_result->set_friendly_name("Round");
|
||||
convert_result->get_output_tensor(0).set_names({"output_tensor",
|
||||
convert_result->get_friendly_name()});
|
||||
convert_result->get_output_tensor(0).set_names({"output_tensor", convert_result->get_friendly_name()});
|
||||
|
||||
auto result = std::make_shared<opset8::Result>(convert_result);
|
||||
auto result = std::make_shared<ov::opset8::Result>(convert_result);
|
||||
result->set_friendly_name("output");
|
||||
|
||||
auto f_10_ref =
|
||||
std::make_shared<ngraph::Function>(ngraph::ResultVector{result}, ngraph::ParameterVector{param});
|
||||
auto f_10_ref = std::make_shared<ov::Model>(ov::ResultVector{result}, ov::ParameterVector{param});
|
||||
f_10_ref->set_friendly_name("Network");
|
||||
|
||||
ov::Core core;
|
||||
@ -202,7 +184,7 @@ TEST_F(RTInfoDeserialization, NodeV10) {
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(RTInfoDeserialization, NamesCollisionV10) {
|
||||
TEST_F(RTInfoDeserialization, names_collision_v10) {
|
||||
std::string model = R"V0G0N(
|
||||
<net name="Network" version="10">
|
||||
<layers>
|
||||
@ -285,14 +267,14 @@ TEST_F(RTInfoDeserialization, NamesCollisionV10) {
|
||||
EXPECT_TRUE(res.first) << res.second;
|
||||
}
|
||||
|
||||
// read IR v10 with new API and check that CNNNetwork precision conversions are applied
|
||||
// read IR v10 with new API
|
||||
{
|
||||
ov::Core core;
|
||||
EXPECT_THROW(core.read_model(model, ov::Tensor()), ov::Exception);
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(RTInfoDeserialization, InputAndOutputV10) {
|
||||
TEST_F(RTInfoDeserialization, input_and_output_v10) {
|
||||
std::string model = R"V0G0N(
|
||||
<net name="Network" version="10">
|
||||
<layers>
|
||||
@ -369,9 +351,8 @@ TEST_F(RTInfoDeserialization, InputAndOutputV10) {
|
||||
auto f = getWithIRFrontend(model);
|
||||
ASSERT_NE(nullptr, f);
|
||||
|
||||
auto check_rt_info = [](const RTMap& info) {
|
||||
const std::string& key = ngraph::FusedNames::get_type_info_static();
|
||||
ASSERT_FALSE(info.count(key));
|
||||
auto check_rt_info = [](const ov::RTMap& info) {
|
||||
ASSERT_FALSE(info.count(ngraph::FusedNames::get_type_info_static()));
|
||||
};
|
||||
|
||||
auto check_version = [](const std::shared_ptr<ov::Model>& f, int ref_version) {
|
||||
@ -410,24 +391,23 @@ TEST_F(RTInfoDeserialization, InputAndOutputV10) {
|
||||
|
||||
// read IR v10 with new API and check that CNNNetwork precision conversions are applied
|
||||
{
|
||||
const ngraph::Shape shape{1, 3, 22, 22};
|
||||
const auto type = ngraph::element::i64;
|
||||
auto param = std::make_shared<ngraph::opset8::Parameter>(type, shape);
|
||||
const ov::Shape shape{1, 3, 22, 22};
|
||||
const auto type = ov::element::i64;
|
||||
auto param = std::make_shared<ov::opset8::Parameter>(type, shape);
|
||||
param->set_friendly_name("in1");
|
||||
param->get_output_tensor(0).set_names({"input_tensor", param->get_friendly_name()});
|
||||
|
||||
auto sum = std::make_shared<opset8::Add>(param, param);
|
||||
auto sum = std::make_shared<ov::opset8::Add>(param, param);
|
||||
|
||||
// TODO: No guarantee that exactly 'convert' will be added by post-processing
|
||||
auto convert_result = std::make_shared<opset8::Convert>(sum, ngraph::element::i32);
|
||||
auto convert_result = std::make_shared<ov::opset8::Convert>(sum, ov::element::i32);
|
||||
convert_result->set_friendly_name("sum");
|
||||
convert_result->get_output_tensor(0).set_names({"output_tensor", convert_result->get_friendly_name()});
|
||||
|
||||
auto result = std::make_shared<opset8::Result>(convert_result);
|
||||
auto result = std::make_shared<ov::opset8::Result>(convert_result);
|
||||
result->set_friendly_name("output");
|
||||
|
||||
auto f_10_ref =
|
||||
std::make_shared<ngraph::Function>(ngraph::ResultVector{result}, ngraph::ParameterVector{param});
|
||||
auto f_10_ref = std::make_shared<ov::Model>(ov::ResultVector{result}, ov::ParameterVector{param});
|
||||
f_10_ref->set_friendly_name("Network");
|
||||
|
||||
ov::Core core;
|
||||
@ -446,7 +426,7 @@ TEST_F(RTInfoDeserialization, InputAndOutputV10) {
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(RTInfoDeserialization, NodeV11) {
|
||||
TEST_F(RTInfoDeserialization, node_v11) {
|
||||
std::string model = R"V0G0N(
|
||||
<net name="Network" version="11">
|
||||
<layers>
|
||||
@ -517,21 +497,19 @@ TEST_F(RTInfoDeserialization, NodeV11) {
|
||||
auto f = getWithIRFrontend(model);
|
||||
ASSERT_NE(nullptr, f);
|
||||
|
||||
auto check_fused_names = [](const RTMap& info, const std::string& names) {
|
||||
const std::string& key = ngraph::FusedNames::get_type_info_static();
|
||||
ASSERT_TRUE(info.count(key));
|
||||
auto fused_names_attr = info.at(key).as<ngraph::FusedNames>();
|
||||
auto check_fused_names = [](const ov::RTMap& info, const std::string& names) {
|
||||
ASSERT_TRUE(info.count(ngraph::FusedNames::get_type_info_static()));
|
||||
auto fused_names_attr = info.at(ngraph::FusedNames::get_type_info_static()).as<ngraph::FusedNames>();
|
||||
EXPECT_EQ(fused_names_attr.getNames(), names);
|
||||
};
|
||||
|
||||
auto check_old_api_map_order = [](const RTMap & info, const std::vector<uint64_t> & order) {
|
||||
const std::string & old_api_map_key = ov::OldApiMapOrder::get_type_info_static();
|
||||
auto check_old_api_map_order = [](const ov::RTMap& info, const std::vector<uint64_t>& order) {
|
||||
const std::string& old_api_map_key = ov::OldApiMapOrder::get_type_info_static();
|
||||
ASSERT_TRUE(info.count(old_api_map_key));
|
||||
auto old_api_map_attr_val = info.at(old_api_map_key).as<ov::OldApiMapOrder>().value;
|
||||
EXPECT_EQ(old_api_map_attr_val, order);
|
||||
};
|
||||
auto check_old_api_map_type = [](const RTMap & info, const ngraph::element::Type& type) {
|
||||
const std::string & old_api_map_key = ov::OldApiMapElementType::get_type_info_static();
|
||||
auto check_old_api_map_type = [](const ov::RTMap& info, const ov::element::Type& type) {
|
||||
const std::string& old_api_map_key = ov::OldApiMapElementType::get_type_info_static();
|
||||
ASSERT_TRUE(info.count(old_api_map_key));
|
||||
auto old_api_map_attr_val = info.at(old_api_map_key).as<ov::OldApiMapElementType>().value;
|
||||
EXPECT_EQ(old_api_map_attr_val, type);
|
||||
@ -548,7 +526,7 @@ TEST_F(RTInfoDeserialization, NodeV11) {
|
||||
auto param = f->get_parameters()[0];
|
||||
check_fused_names(param->get_rt_info(), "in1");
|
||||
check_old_api_map_order(param->get_rt_info(), std::vector<uint64_t>({0, 2, 3, 1}));
|
||||
check_old_api_map_type(param->get_rt_info(), ngraph::element::Type_t::f16);
|
||||
check_old_api_map_type(param->get_rt_info(), ov::element::Type_t::f16);
|
||||
|
||||
auto result = f->get_result();
|
||||
check_old_api_map_order(result->get_rt_info(), std::vector<uint64_t>({0, 3, 1, 2}));
|
||||
@ -562,7 +540,7 @@ TEST_F(RTInfoDeserialization, NodeV11) {
|
||||
ASSERT_NE(nullptr, f_11);
|
||||
|
||||
check_old_api_map_order(f_11->get_parameters()[0]->get_rt_info(), std::vector<uint64_t>({0, 2, 3, 1}));
|
||||
check_old_api_map_type(f_11->get_parameters()[0]->get_rt_info(), ngraph::element::Type_t::f16);
|
||||
check_old_api_map_type(f_11->get_parameters()[0]->get_rt_info(), ov::element::Type_t::f16);
|
||||
|
||||
check_old_api_map_order(f_11->get_result()->get_rt_info(), std::vector<uint64_t>({0, 3, 1, 2}));
|
||||
|
||||
@ -574,40 +552,36 @@ TEST_F(RTInfoDeserialization, NodeV11) {
|
||||
|
||||
// read IR v11 with old API and check that old_api_map is applied
|
||||
{
|
||||
const ngraph::PartialShape shape{1, 3, 22, 22};
|
||||
auto type = ngraph::element::f16;
|
||||
auto param = std::make_shared<ngraph::opset8::Parameter>(type, shape);
|
||||
const ov::PartialShape shape{1, 3, 22, 22};
|
||||
auto type = ov::element::f16;
|
||||
auto param = std::make_shared<ov::opset8::Parameter>(type, shape);
|
||||
param->set_friendly_name("in1");
|
||||
param->get_output_tensor(0).set_names({"input_tensor"});
|
||||
|
||||
// TODO: No guarantee that Transpose will use exactly 'uint64_t' constant
|
||||
auto constant_param = std::make_shared<opset8::Constant>(ngraph::element::u64,
|
||||
ngraph::Shape{4},
|
||||
std::vector<uint64_t>{0, 2, 3, 1});
|
||||
auto transpose_param = std::make_shared<opset8::Transpose>(param, constant_param);
|
||||
auto constant_param =
|
||||
std::make_shared<ov::opset8::Constant>(ov::element::u64, ov::Shape{4}, std::vector<uint64_t>{0, 2, 3, 1});
|
||||
auto transpose_param = std::make_shared<ov::opset8::Transpose>(param, constant_param);
|
||||
|
||||
// TODO: No guarantee that only 'convert' will be added by implicit pre-processing
|
||||
auto convert_param = std::make_shared<opset8::Convert>(transpose_param, ngraph::element::f32);
|
||||
auto convert_param = std::make_shared<ov::opset8::Convert>(transpose_param, ov::element::f32);
|
||||
|
||||
auto round = std::make_shared<opset8::Round>(convert_param,
|
||||
ngraph::opset8::Round::RoundMode::HALF_TO_EVEN);
|
||||
auto round = std::make_shared<ov::opset8::Round>(convert_param, ov::opset8::Round::RoundMode::HALF_TO_EVEN);
|
||||
// TODO: runtime information should migrate as well?
|
||||
round->get_rt_info()[ngraph::FusedNames::get_type_info_static()] = ngraph::FusedNames("Round1,Round2");
|
||||
|
||||
// TODO: No guarantee that exactly 'convert, then transpose' will be added by implicit post-processing
|
||||
auto constant_result = std::make_shared<opset8::Constant>(ngraph::element::u64,
|
||||
ngraph::Shape{4},
|
||||
std::vector<uint64_t>{0, 3, 1, 2});
|
||||
auto transpose_result = std::make_shared<opset8::Transpose>(round, constant_result);
|
||||
auto constant_result =
|
||||
std::make_shared<ov::opset8::Constant>(ov::element::u64, ov::Shape{4}, std::vector<uint64_t>{0, 3, 1, 2});
|
||||
auto transpose_result = std::make_shared<ov::opset8::Transpose>(round, constant_result);
|
||||
|
||||
transpose_result->set_friendly_name("Round");
|
||||
transpose_result->get_output_tensor(0).set_names({"output_tensor"});
|
||||
|
||||
auto result = std::make_shared<opset8::Result>(transpose_result);
|
||||
auto result = std::make_shared<ov::opset8::Result>(transpose_result);
|
||||
result->set_friendly_name("output");
|
||||
|
||||
auto f_10_ref =
|
||||
std::make_shared<ngraph::Function>(ngraph::ResultVector{result}, ngraph::ParameterVector{param});
|
||||
auto f_10_ref = std::make_shared<ov::Model>(ov::ResultVector{result}, ov::ParameterVector{param});
|
||||
f_10_ref->set_friendly_name("Network");
|
||||
|
||||
InferenceEngine::Core core;
|
||||
@ -637,10 +611,10 @@ TEST_F(RTInfoDeserialization, NodeV11) {
|
||||
EXPECT_EQ(shape, f_10_core->get_output_partial_shape(0));
|
||||
|
||||
// check that old api map is removed once applied
|
||||
auto check_old_api_rt_info = [](const RTMap & info) {
|
||||
const std::string & key_order = ov::OldApiMapOrder::get_type_info_static();
|
||||
auto check_old_api_rt_info = [](const ov::RTMap& info) {
|
||||
const std::string& key_order = ov::OldApiMapOrder::get_type_info_static();
|
||||
EXPECT_EQ(0, info.count(key_order));
|
||||
const std::string & key_type = ov::OldApiMapElementType::get_type_info_static();
|
||||
const std::string& key_type = ov::OldApiMapElementType::get_type_info_static();
|
||||
EXPECT_EQ(0, info.count(key_type));
|
||||
};
|
||||
|
||||
@ -655,8 +629,7 @@ TEST_F(RTInfoDeserialization, NodeV11) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
TEST_F(RTInfoDeserialization, NodeV11_uint8) {
|
||||
TEST_F(RTInfoDeserialization, node_v11_uint8) {
|
||||
std::string model = R"V0G0N(
|
||||
<net name="Network" version="11">
|
||||
<layers>
|
||||
@ -722,34 +695,29 @@ TEST_F(RTInfoDeserialization, NodeV11_uint8) {
|
||||
ASSERT_NE(nullptr, f);
|
||||
|
||||
// read IR v11 with old API and check that old_api_map is applied
|
||||
|
||||
const ngraph::PartialShape shape{1, 3, 22, 22};
|
||||
auto type = ngraph::element::f16;
|
||||
auto param = std::make_shared<ngraph::opset8::Parameter>(type, shape);
|
||||
const ov::PartialShape shape{1, 3, 22, 22};
|
||||
auto type = ov::element::f16;
|
||||
auto param = std::make_shared<ov::opset8::Parameter>(type, shape);
|
||||
param->set_friendly_name("in1");
|
||||
param->get_output_tensor(0).set_names({"input_tensor"});
|
||||
|
||||
auto constant_param = std::make_shared<opset8::Constant>(ngraph::element::u64,
|
||||
ngraph::Shape{4},
|
||||
std::vector<uint64_t>{0, 2, 3, 1});
|
||||
auto transpose_param = std::make_shared<opset8::Transpose>(param, constant_param);
|
||||
auto constant_param =
|
||||
std::make_shared<ov::opset8::Constant>(ov::element::u64, ov::Shape{4}, std::vector<uint64_t>{0, 2, 3, 1});
|
||||
auto transpose_param = std::make_shared<ov::opset8::Transpose>(param, constant_param);
|
||||
|
||||
auto round = std::make_shared<opset8::Round>(transpose_param,
|
||||
ngraph::opset8::Round::RoundMode::HALF_TO_EVEN);
|
||||
auto round = std::make_shared<ov::opset8::Round>(transpose_param, ov::opset8::Round::RoundMode::HALF_TO_EVEN);
|
||||
round->get_rt_info()[ngraph::FusedNames::get_type_info_static()] = ngraph::FusedNames("Round1,Round2");
|
||||
auto constant_result = std::make_shared<opset8::Constant>(ngraph::element::u64,
|
||||
ngraph::Shape{4},
|
||||
std::vector<uint64_t>{0, 3, 1, 2});
|
||||
auto transpose_result = std::make_shared<opset8::Transpose>(round, constant_result);
|
||||
auto constant_result =
|
||||
std::make_shared<ov::opset8::Constant>(ov::element::u64, ov::Shape{4}, std::vector<uint64_t>{0, 3, 1, 2});
|
||||
auto transpose_result = std::make_shared<ov::opset8::Transpose>(round, constant_result);
|
||||
|
||||
transpose_result->set_friendly_name("Round");
|
||||
transpose_result->get_output_tensor(0).set_names({"output_tensor"});
|
||||
|
||||
auto result = std::make_shared<opset8::Result>(transpose_result);
|
||||
auto result = std::make_shared<ov::opset8::Result>(transpose_result);
|
||||
result->set_friendly_name("output");
|
||||
|
||||
auto f_10_ref =
|
||||
std::make_shared<ngraph::Function>(ngraph::ResultVector{result}, ngraph::ParameterVector{param});
|
||||
auto f_10_ref = std::make_shared<ov::Model>(ov::ResultVector{result}, ov::ParameterVector{param});
|
||||
f_10_ref->set_friendly_name("Network");
|
||||
|
||||
InferenceEngine::Core core;
|
||||
@ -763,11 +731,11 @@ TEST_F(RTInfoDeserialization, NodeV11_uint8) {
|
||||
EXPECT_EQ(InferenceEngine::Precision::FP32, cnn_core.getOutputsInfo()["Round"]->getPrecision());
|
||||
|
||||
const auto fc = FunctionsComparator::with_default()
|
||||
.enable(FunctionsComparator::ATTRIBUTES)
|
||||
.enable(FunctionsComparator::PRECISIONS)
|
||||
.enable(FunctionsComparator::RUNTIME_KEYS)
|
||||
.enable(FunctionsComparator::NAMES)
|
||||
.enable(FunctionsComparator::CONST_VALUES);
|
||||
.enable(FunctionsComparator::ATTRIBUTES)
|
||||
.enable(FunctionsComparator::PRECISIONS)
|
||||
.enable(FunctionsComparator::RUNTIME_KEYS)
|
||||
.enable(FunctionsComparator::NAMES)
|
||||
.enable(FunctionsComparator::CONST_VALUES);
|
||||
auto res = fc.compare(f_10_core, f_10_ref);
|
||||
EXPECT_TRUE(res.valid) << res.message;
|
||||
|
||||
@ -777,10 +745,10 @@ TEST_F(RTInfoDeserialization, NodeV11_uint8) {
|
||||
EXPECT_EQ(shape, f_10_core->get_output_partial_shape(0));
|
||||
|
||||
// check that old api map is removed once applied
|
||||
auto check_old_api_rt_info = [](const RTMap & info) {
|
||||
const std::string & key_order = ov::OldApiMapOrder::get_type_info_static();
|
||||
auto check_old_api_rt_info = [](const ov::RTMap& info) {
|
||||
const std::string& key_order = ov::OldApiMapOrder::get_type_info_static();
|
||||
EXPECT_EQ(0, info.count(key_order));
|
||||
const std::string & key_type = ov::OldApiMapElementType::get_type_info_static();
|
||||
const std::string& key_type = ov::OldApiMapElementType::get_type_info_static();
|
||||
EXPECT_EQ(0, info.count(key_type));
|
||||
};
|
||||
|
||||
@ -789,13 +757,12 @@ TEST_F(RTInfoDeserialization, NodeV11_uint8) {
|
||||
|
||||
// check information about layout
|
||||
EXPECT_TRUE(f_10_core->get_parameters()[0]->get_layout().empty())
|
||||
<< f_10_core->get_parameters()[0]->get_layout().to_string();
|
||||
<< f_10_core->get_parameters()[0]->get_layout().to_string();
|
||||
EXPECT_TRUE(f_10_core->get_results()[0]->get_layout().empty())
|
||||
<< f_10_core->get_results()[0]->get_layout().to_string();
|
||||
<< f_10_core->get_results()[0]->get_layout().to_string();
|
||||
}
|
||||
|
||||
|
||||
TEST_F(RTInfoDeserialization, NodeV11MultipleRTKeys) {
|
||||
TEST_F(RTInfoDeserialization, node_v11_multiple_rt_keys) {
|
||||
std::string model = R"V0G0N(
|
||||
<net name="Network" version="11">
|
||||
<layers>
|
||||
@ -861,10 +828,10 @@ TEST_F(RTInfoDeserialization, NodeV11MultipleRTKeys) {
|
||||
</edges>
|
||||
</net>
|
||||
)V0G0N";
|
||||
ASSERT_ANY_THROW(getWithIRFrontend(model));
|
||||
ASSERT_THROW(getWithIRFrontend(model), ov::Exception);
|
||||
}
|
||||
|
||||
TEST_F(RTInfoDeserialization, InputAndOutputV11) {
|
||||
TEST_F(RTInfoDeserialization, input_and_output_v11) {
|
||||
std::string model = R"V0G0N(
|
||||
<net name="Network" version="11">
|
||||
<layers>
|
||||
@ -950,20 +917,21 @@ TEST_F(RTInfoDeserialization, InputAndOutputV11) {
|
||||
};
|
||||
check_version(f, 11);
|
||||
|
||||
auto check_fused_names = [](const RTMap& info, const std::string& names) {
|
||||
auto check_fused_names = [](const ov::RTMap& info, const std::string& names) {
|
||||
const std::string& key = ngraph::FusedNames::get_type_info_static();
|
||||
ASSERT_TRUE(info.count(key));
|
||||
auto fused_names_attr = info.at(key).as<ngraph::FusedNames>();
|
||||
ASSERT_EQ(fused_names_attr.getNames(), names);
|
||||
};
|
||||
|
||||
|
||||
auto param = f->get_parameters()[0];
|
||||
check_fused_names(param->output(0).get_rt_info(), "test1,test2");
|
||||
EXPECT_EQ(param->get_layout(), "NCHW");
|
||||
auto var0 = f->input(0).get_rt_info()
|
||||
.at(ov::preprocess::TensorInfoMemoryType::get_type_info_static())
|
||||
.as<ov::preprocess::TensorInfoMemoryType>().value;
|
||||
auto var0 = f->input(0)
|
||||
.get_rt_info()
|
||||
.at(ov::preprocess::TensorInfoMemoryType::get_type_info_static())
|
||||
.as<ov::preprocess::TensorInfoMemoryType>()
|
||||
.value;
|
||||
EXPECT_EQ(var0, "test_memory_type");
|
||||
|
||||
auto result = f->get_result();
|
||||
@ -986,7 +954,7 @@ TEST_F(RTInfoDeserialization, InputAndOutputV11) {
|
||||
EXPECT_EQ(InferenceEngine::Precision::FP32, cnn.getOutputsInfo()["sum"]->getPrecision());
|
||||
|
||||
// check that old api map is removed once applied
|
||||
auto check_old_api_rt_info = [](const RTMap& info) {
|
||||
auto check_old_api_rt_info = [](const ov::RTMap& info) {
|
||||
const std::string& key_type = ov::OldApiMapElementType::get_type_info_static();
|
||||
EXPECT_FALSE(info.count(key_type));
|
||||
const std::string& key_order = ov::OldApiMapElementType::get_type_info_static();
|
||||
@ -1003,7 +971,7 @@ TEST_F(RTInfoDeserialization, InputAndOutputV11) {
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(RTInfoDeserialization, IndexesInputAndOutputV11) {
|
||||
TEST_F(RTInfoDeserialization, indexes_input_and_output_v11) {
|
||||
std::string model = R"V0G0N(
|
||||
<net name="Network" version="11">
|
||||
<layers>
|
||||
@ -1121,7 +1089,7 @@ TEST_F(RTInfoDeserialization, IndexesInputAndOutputV11) {
|
||||
ASSERT_EQ(f->get_results()[1]->get_friendly_name(), "output1");
|
||||
}
|
||||
|
||||
TEST_F(RTInfoDeserialization, V11toV10WithoutRTInfo) {
|
||||
TEST_F(RTInfoDeserialization, v11_to_v10_without_rt_info) {
|
||||
std::string model = R"V0G0N(
|
||||
<net name="Network" version="11">
|
||||
<layers>
|
||||
@ -1208,4 +1176,4 @@ TEST_F(RTInfoDeserialization, V11toV10WithoutRTInfo) {
|
||||
ASSERT_NE(nullptr, f_10);
|
||||
|
||||
check_version(f_10, 10);
|
||||
}
|
||||
}
|
252
src/frontends/ir/tests/tensor_iterator_deserialization.cpp
Normal file
252
src/frontends/ir/tests/tensor_iterator_deserialization.cpp
Normal file
@ -0,0 +1,252 @@
|
||||
// Copyright (C) 2018-2022 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "frontend_test.hpp"
|
||||
#include "openvino/opsets/opset1.hpp"
|
||||
#include "openvino/opsets/opset8.hpp"
|
||||
|
||||
class IRFrontendTestsTensorIterator : public ::testing::Test, public IRFrontendTestsImpl {
|
||||
protected:
|
||||
void SetUp() override {}
|
||||
|
||||
void TearDown() override {
|
||||
RemoveTemporalFiles();
|
||||
}
|
||||
};
|
||||
|
||||
TEST_F(IRFrontendTestsTensorIterator, tensor_iterator_merged_input) {
|
||||
std::string testModel = R"V0G0N(
|
||||
<net name="Network" version="11">
|
||||
<layers>
|
||||
<layer id="0" name="Parameter1" type="Parameter" version="opset1">
|
||||
<data element_type="f32" shape="1,2,3"/>
|
||||
<output>
|
||||
<port id="0" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>2</dim>
|
||||
<dim>3</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="1" name="TensorIterator" type="TensorIterator" version="opset1">
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>1</dim>
|
||||
<dim>2</dim>
|
||||
<dim>3</dim>
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="1" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>2</dim>
|
||||
<dim>3</dim>
|
||||
</port>
|
||||
</output>
|
||||
<port_map>
|
||||
<input external_port_id="0" internal_layer_id="0"/>
|
||||
<output external_port_id="1" internal_layer_id="1"/>
|
||||
</port_map>
|
||||
<back_edges>
|
||||
<edge from-layer="1" to-layer="0"/>
|
||||
</back_edges>
|
||||
<body>
|
||||
<layers>
|
||||
<layer id="0" name="internalParameter1" type="Parameter" version="opset1">
|
||||
<data element_type="f32" shape="1,2,3"/>
|
||||
<output>
|
||||
<port id="0" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>2</dim>
|
||||
<dim>3</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="1" name="internalResult1" type="Result" version="opset1">
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>1</dim>
|
||||
<dim>2</dim>
|
||||
<dim>3</dim>
|
||||
</port>
|
||||
</input>
|
||||
</layer>
|
||||
</layers>
|
||||
<edges>
|
||||
<edge from-layer="0" from-port="0" to-layer="1" to-port="0"/>
|
||||
</edges>
|
||||
</body>
|
||||
</layer>
|
||||
<layer id="2" name="Result1" type="Result" version="opset1">
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>1</dim>
|
||||
<dim>2</dim>
|
||||
<dim>3</dim>
|
||||
</port>
|
||||
</input>
|
||||
</layer>
|
||||
</layers>
|
||||
<edges>
|
||||
<edge from-layer="0" from-port="0" to-layer="1" to-port="0"/>
|
||||
<edge from-layer="1" from-port="1" to-layer="2" to-port="0"/>
|
||||
</edges>
|
||||
</net>
|
||||
)V0G0N";
|
||||
|
||||
std::shared_ptr<ov::Model> model;
|
||||
|
||||
ASSERT_NO_THROW(model = core.read_model(testModel, ov::Tensor()));
|
||||
ASSERT_TRUE(!!model);
|
||||
|
||||
std::shared_ptr<ov::Model> modelRef;
|
||||
{
|
||||
auto parameter = std::make_shared<ov::opset1::Parameter>(ov::element::f32, ov::Shape{1, 2, 3});
|
||||
parameter->set_friendly_name("Parameter1");
|
||||
auto tensor_iterator = std::make_shared<ov::opset8::TensorIterator>();
|
||||
|
||||
std::shared_ptr<ov::Model> body;
|
||||
auto internalParameter = std::make_shared<ov::opset1::Parameter>(ov::element::f32, ov::Shape{1, 2, 3});
|
||||
internalParameter->set_friendly_name("internalParameter1");
|
||||
auto result1 = std::make_shared<ov::opset1::Result>(internalParameter);
|
||||
result1->set_friendly_name("internalResult1");
|
||||
body = std::make_shared<ov::Model>(ov::NodeVector{result1}, ov::ParameterVector{internalParameter});
|
||||
tensor_iterator->set_body(body);
|
||||
tensor_iterator->set_friendly_name("TensorIterator");
|
||||
tensor_iterator->set_merged_input(internalParameter, parameter, result1);
|
||||
auto out0 = tensor_iterator->get_iter_value(result1, -1);
|
||||
|
||||
auto result = std::make_shared<ov::opset1::Result>(tensor_iterator->output(0));
|
||||
result->set_friendly_name("Result1");
|
||||
|
||||
modelRef = std::make_shared<ov::Model>(ov::NodeVector{result}, ov::ParameterVector{parameter});
|
||||
}
|
||||
|
||||
const auto fc = FunctionsComparator::with_default()
|
||||
.enable(FunctionsComparator::ATTRIBUTES)
|
||||
.enable(FunctionsComparator::PRECISIONS)
|
||||
.enable(FunctionsComparator::RUNTIME_KEYS)
|
||||
.enable(FunctionsComparator::NAMES)
|
||||
.enable(FunctionsComparator::CONST_VALUES);
|
||||
const auto res = fc.compare(model, modelRef);
|
||||
EXPECT_TRUE(res.valid) << res.message;
|
||||
}
|
||||
|
||||
TEST_F(IRFrontendTestsTensorIterator, tensor_iterator_slised_input) {
|
||||
std::string testModel = R"V0G0N(
|
||||
<net name="Network" version="11">
|
||||
<layers>
|
||||
<layer id="0" name="Parameter1" type="Parameter" version="opset1">
|
||||
<data element_type="f32" shape="1,2,3"/>
|
||||
<output>
|
||||
<port id="0" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>2</dim>
|
||||
<dim>3</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="1" name="TensorIterator" type="TensorIterator" version="opset1">
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>1</dim>
|
||||
<dim>2</dim>
|
||||
<dim>3</dim>
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="1" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>2</dim>
|
||||
<dim>3</dim>
|
||||
</port>
|
||||
</output>
|
||||
<port_map>
|
||||
<input axis="2" external_port_id="0" internal_layer_id="0" part_size="1" stride="1"/>
|
||||
<output axis="2" external_port_id="1" internal_layer_id="1" part_size="1" stride="1"/>
|
||||
</port_map>
|
||||
<back_edges>
|
||||
<edge from-layer="1" to-layer="0"/>
|
||||
</back_edges>
|
||||
<body>
|
||||
<layers>
|
||||
<layer id="0" name="internalParameter1" type="Parameter" version="opset1">
|
||||
<data element_type="f32" shape="1,2,1"/>
|
||||
<output>
|
||||
<port id="0" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>2</dim>
|
||||
<dim>1</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="1" name="internalResult1" type="Result" version="opset1">
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>1</dim>
|
||||
<dim>2</dim>
|
||||
<dim>1</dim>
|
||||
</port>
|
||||
</input>
|
||||
</layer>
|
||||
</layers>
|
||||
<edges>
|
||||
<edge from-layer="0" from-port="0" to-layer="1" to-port="0"/>
|
||||
</edges>
|
||||
</body>
|
||||
</layer>
|
||||
<layer id="2" name="Result1" type="Result" version="opset1">
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>1</dim>
|
||||
<dim>2</dim>
|
||||
<dim>3</dim>
|
||||
</port>
|
||||
</input>
|
||||
</layer>
|
||||
</layers>
|
||||
<edges>
|
||||
<edge from-layer="0" from-port="0" to-layer="1" to-port="0"/>
|
||||
<edge from-layer="1" from-port="1" to-layer="2" to-port="0"/>
|
||||
</edges>
|
||||
</net>
|
||||
)V0G0N";
|
||||
|
||||
std::shared_ptr<ov::Model> model;
|
||||
|
||||
ASSERT_NO_THROW(model = core.read_model(testModel, ov::Tensor()));
|
||||
ASSERT_TRUE(!!model);
|
||||
|
||||
std::shared_ptr<ov::Model> modelRef;
|
||||
{
|
||||
auto parameter = std::make_shared<ov::opset1::Parameter>(ov::element::f32, ov::Shape{1, 2, 3});
|
||||
parameter->set_friendly_name("Parameter1");
|
||||
auto tensor_iterator = std::make_shared<ov::opset8::TensorIterator>();
|
||||
|
||||
std::shared_ptr<ov::Model> body;
|
||||
auto internalParameter = std::make_shared<ov::opset1::Parameter>(ov::element::f32, ov::Shape{1, 2, 3});
|
||||
internalParameter->set_friendly_name("internalParameter1");
|
||||
auto result1 = std::make_shared<ov::opset1::Result>(internalParameter);
|
||||
result1->set_friendly_name("internalResult1");
|
||||
body = std::make_shared<ov::Model>(ov::NodeVector{result1}, ov::ParameterVector{internalParameter});
|
||||
tensor_iterator->set_body(body);
|
||||
tensor_iterator->set_friendly_name("TensorIterator");
|
||||
tensor_iterator->set_sliced_input(internalParameter, parameter, 0, 1, 1, -1, 2);
|
||||
auto out0 = tensor_iterator->get_concatenated_slices(result1, 0, 1, 1, -1, 2);
|
||||
|
||||
auto result = std::make_shared<ov::opset1::Result>(tensor_iterator->output(0));
|
||||
result->set_friendly_name("Result1");
|
||||
|
||||
modelRef = std::make_shared<ov::Model>(ov::NodeVector{result}, ov::ParameterVector{parameter});
|
||||
}
|
||||
|
||||
const auto fc = FunctionsComparator::with_default()
|
||||
.enable(FunctionsComparator::ATTRIBUTES)
|
||||
.enable(FunctionsComparator::PRECISIONS)
|
||||
.enable(FunctionsComparator::RUNTIME_KEYS)
|
||||
.enable(FunctionsComparator::NAMES)
|
||||
.enable(FunctionsComparator::CONST_VALUES);
|
||||
const auto res = fc.compare(model, modelRef);
|
||||
EXPECT_TRUE(res.valid) << res.message;
|
||||
}
|
Loading…
Reference in New Issue
Block a user