Upgrade CPU func tests to 2.o (#21357)
* [CPU Plugin][Func Test] Upgrade AddConvertToReorderTest to API 2.0 Signed-off-by: Zhai, Xuejun <xuejun.zhai@intel.com> * [CPU Plugin][Func Test] Upgrade AlignMatMulInputRanksTest to API 2.0 Signed-off-by: Zhai, Xuejun <xuejun.zhai@intel.com> * [CPU Plugin][Func Test] Upgrade GatherAddAvgpool to API 2.0 Signed-off-by: Zhai, Xuejun <xuejun.zhai@intel.com> * [CPU Plugin][Func Test] Upgrade InputNoReorderEltwiseBF16 to API 2.0 Signed-off-by: Zhai, Xuejun <xuejun.zhai@intel.com> * [CPU Plugin][Func Test] Fix review comments Signed-off-by: Zhai, Xuejun <xuejun.zhai@intel.com> --------- Signed-off-by: Zhai, Xuejun <xuejun.zhai@intel.com>
This commit is contained in:
parent
5dda9f333b
commit
9c94873842
@ -1267,6 +1267,8 @@ bool fuse_type_to_constant(const std::shared_ptr<ov::Node>& node,
|
||||
new_const = change_constant_precision<ov::element::Type_t::boolean, ov::element::Type_t::u8>(constant);
|
||||
} else if (from == ov::element::boolean && to == ov::element::i32) {
|
||||
new_const = change_constant_precision<ov::element::Type_t::boolean, ov::element::Type_t::i32>(constant);
|
||||
} else if (from == ov::element::i8 && to == ov::element::i64) {
|
||||
new_const = change_constant_precision<ov::element::Type_t::i8, ov::element::Type_t::i64>(constant);
|
||||
} else if (from == ov::element::i4 || from == ov::element::u4 || from == ov::element::u1) {
|
||||
new_const = convert_low_precisions_int(constant, to);
|
||||
} else {
|
||||
|
@ -2,53 +2,51 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "test_utils/cpu_test_utils.hpp"
|
||||
#include "shared_test_classes/base/layer_test_utils.hpp"
|
||||
#include "ov_models/utils/ov_helpers.hpp"
|
||||
#include "ov_models/builders.hpp"
|
||||
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||
#include "test_utils/cpu_test_utils.hpp"
|
||||
|
||||
using namespace InferenceEngine;
|
||||
using namespace CPUTestUtils;
|
||||
|
||||
namespace LayerTestsDefinitions {
|
||||
namespace ov {
|
||||
namespace test {
|
||||
|
||||
class AddConvertToReorderTest : virtual public LayerTestsUtils::LayerTestsCommon {
|
||||
class AddConvertToReorderTest : virtual public SubgraphBaseStaticTest {
|
||||
public:
|
||||
void BuildGraph(const ngraph::element::Type& secondInpType) {
|
||||
void BuildGraph(const ov::element::Type& secondInpType) {
|
||||
secondConstantType = secondInpType;
|
||||
int axis = 2;
|
||||
std::vector<int> indices = {0, 3, 2, 1};
|
||||
std::vector<size_t> indicesShape = {2, 2};
|
||||
std::vector<size_t> inputShape = {10, 20, 30, 40};
|
||||
|
||||
InferenceEngine::Precision netPrecision = inPrc = outPrc = Precision::FP32;
|
||||
ov::element::Type netPrecision = inType = outType = ov::element::f32;
|
||||
targetDevice = ov::test::utils::DEVICE_CPU;
|
||||
|
||||
ASSERT_EQ(ngraph::shape_size(indicesShape), indices.size())
|
||||
<< "Indices vector size and provided indices shape doesn't fit each other";
|
||||
auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
|
||||
ov::ParameterVector params{std::make_shared<ov::op::v0::Parameter>(ngPrc, ov::Shape(inputShape))};
|
||||
auto indicesNode = ngraph::opset3::Constant::create(secondConstantType, ngraph::Shape(indicesShape), indices);
|
||||
auto axisNode = ngraph::opset3::Constant::create(ngraph::element::i64, ngraph::Shape({}), {axis});
|
||||
auto gather = std::make_shared<ngraph::opset3::Gather>(params[0], indicesNode, axisNode);
|
||||
ngraph::ResultVector results{std::make_shared<ngraph::opset3::Result>(gather)};
|
||||
function = std::make_shared<ngraph::Function>(results, params, "gather");
|
||||
ASSERT_EQ(ov::shape_size(indicesShape), indices.size())
|
||||
<< "Indices vector size and provided indices shape doesn't fit each other";
|
||||
ov::ParameterVector params{std::make_shared<ov::op::v0::Parameter>(netPrecision, ov::Shape(inputShape))};
|
||||
auto indicesNode = ov::op::v0::Constant::create(secondConstantType, ov::Shape(indicesShape), indices);
|
||||
auto axisNode = ov::op::v0::Constant::create(ov::element::i64, ov::Shape({}), {axis});
|
||||
auto gather = std::make_shared<ov::op::v1::Gather>(params[0], indicesNode, axisNode);
|
||||
ov::ResultVector results{std::make_shared<ov::op::v0::Result>(gather)};
|
||||
function = std::make_shared<ov::Model>(results, params, "gather");
|
||||
}
|
||||
std::vector<std::pair<ngraph::element::Type, std::vector<std::uint8_t>>> CalculateRefs() override {
|
||||
std::vector<ov::Tensor> calculate_refs() override {
|
||||
// Convert the second input constant precision to i64 to run the reference function
|
||||
if (ngraph::element::Type_t::i8 == secondConstantType) {
|
||||
ngraph::pass::ConvertPrecision<ngraph::element::Type_t::i8, ngraph::element::Type_t::i64>().run_on_model(functionRefs);
|
||||
} else if (ngraph::element::Type_t::bf16 == secondConstantType) {
|
||||
ngraph::pass::ConvertPrecision<ngraph::element::Type_t::bf16, ngraph::element::Type_t::i64>().run_on_model(functionRefs);
|
||||
if (ov::element::i8 == secondConstantType) {
|
||||
convert_precisions.insert({ov::element::i8, ov::element::i64});
|
||||
} else if (ov::element::bf16 == secondConstantType) {
|
||||
convert_precisions.insert({ov::element::bf16, ov::element::i64});
|
||||
}
|
||||
return LayerTestsUtils::LayerTestsCommon::CalculateRefs();
|
||||
return SubgraphBaseTest::calculate_refs();
|
||||
}
|
||||
|
||||
private:
|
||||
ngraph::element::Type secondConstantType;
|
||||
ov::element::Type secondConstantType;
|
||||
};
|
||||
|
||||
namespace {
|
||||
namespace {
|
||||
|
||||
/* Test insertion of the Reorder layer if there is one.
|
||||
|
||||
@ -63,10 +61,11 @@ namespace {
|
||||
Output[FP32]
|
||||
*/
|
||||
TEST_F(AddConvertToReorderTest, smoke_TestAddReorder_CPU) {
|
||||
BuildGraph(ngraph::element::i8);
|
||||
Run();
|
||||
CheckNumberOfNodesWithType(executableNetwork, "Convert", 0);
|
||||
CheckNumberOfNodesWithType(executableNetwork, "Reorder", 1);
|
||||
BuildGraph(ov::element::i8);
|
||||
run();
|
||||
CheckNumberOfNodesWithType(compiledModel, "Convert", 0);
|
||||
CheckNumberOfNodesWithType(compiledModel, "Reorder", 1);
|
||||
}
|
||||
} // namespace
|
||||
} // namespace LayerTestsDefinitions
|
||||
} // namespace
|
||||
} // namespace test
|
||||
} // namespace ov
|
||||
|
@ -2,35 +2,37 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "test_utils/cpu_test_utils.hpp"
|
||||
#include "test_utils/fusing_test_utils.hpp"
|
||||
#include "ov_models/builders.hpp"
|
||||
#include "common_test_utils/common_utils.hpp"
|
||||
|
||||
#include <algorithm>
|
||||
#include <cassert>
|
||||
|
||||
using namespace ngraph;
|
||||
using namespace InferenceEngine;
|
||||
#include "common_test_utils/common_utils.hpp"
|
||||
#include "ov_models/builders.hpp"
|
||||
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||
#include "test_utils/cpu_test_utils.hpp"
|
||||
#include "test_utils/fusing_test_utils.hpp"
|
||||
|
||||
using namespace CPUTestUtils;
|
||||
|
||||
namespace SubgraphTestsDefinitions {
|
||||
namespace ov {
|
||||
namespace test {
|
||||
|
||||
using AlignMatMulInputRanksTestParams = std::tuple<std::pair<SizeVector, SizeVector>, // IS fully connected
|
||||
fusingSpecificParams>;
|
||||
using AlignMatMulInputRanksTestParams = std::tuple<std::pair<ov::Shape, ov::Shape>, // IS fully connected
|
||||
fusingSpecificParams>;
|
||||
|
||||
class AlignMatMulInputRanksTest : public testing::WithParamInterface<AlignMatMulInputRanksTestParams>, public CpuTestWithFusing,
|
||||
virtual public LayerTestsUtils::LayerTestsCommon {
|
||||
class AlignMatMulInputRanksTest : public testing::WithParamInterface<AlignMatMulInputRanksTestParams>,
|
||||
public CpuTestWithFusing,
|
||||
virtual public SubgraphBaseStaticTest {
|
||||
public:
|
||||
static std::string getTestCaseName(testing::TestParamInfo<AlignMatMulInputRanksTestParams> obj) {
|
||||
std::pair<SizeVector, SizeVector> supportedInputShapes;
|
||||
std::pair<ov::Shape, ov::Shape> supportedInputShapes;
|
||||
fusingSpecificParams fusingParams;
|
||||
std::tie(supportedInputShapes, fusingParams) = obj.param;
|
||||
SizeVector inputShapeA = supportedInputShapes.first; SizeVector inputShapeB = supportedInputShapes.second;
|
||||
ov::Shape inputShapeA = supportedInputShapes.first;
|
||||
ov::Shape inputShapeB = supportedInputShapes.second;
|
||||
|
||||
std::ostringstream result;
|
||||
result << "IS_A=" << ov::test::utils::vec2str(inputShapeA) << "_";
|
||||
result << "IS_B=" << ov::test::utils::vec2str(inputShapeB) << "_";
|
||||
result << "IS_A=" << inputShapeA << "_";
|
||||
result << "IS_B=" << inputShapeB << "_";
|
||||
result << CpuTestWithFusing::getTestCaseName(fusingParams);
|
||||
|
||||
return result.str();
|
||||
@ -39,7 +41,7 @@ public:
|
||||
protected:
|
||||
void SetUp() override {
|
||||
targetDevice = ov::test::utils::DEVICE_CPU;
|
||||
std::pair<SizeVector, SizeVector> inShapes;
|
||||
std::pair<ov::Shape, ov::Shape> inShapes;
|
||||
fusingSpecificParams fusingParams;
|
||||
std::tie(inShapes, fusingParams) = this->GetParam();
|
||||
|
||||
@ -48,14 +50,14 @@ protected:
|
||||
if (inShapes.first.size() == 1 || inShapes.second.size() == 1)
|
||||
expectedNumOfReshapes++; // output will be squeezed
|
||||
if (inShapes.first.size() == 1 && inShapes.second.size() == 1)
|
||||
expectedNumOfReshapes+=2; // both inputs unsqueezed and output squeezed
|
||||
expectedNumOfReshapes += 2; // both inputs unsqueezed and output squeezed
|
||||
|
||||
if (inShapes.first.size() != 1 && inShapes.second.size() != 1) // no fusing through Reshape after output
|
||||
if (inShapes.first.size() != 1 && inShapes.second.size() != 1) // no fusing through Reshape after output
|
||||
std::tie(postOpMgrPtr, fusedOps) = fusingParams;
|
||||
|
||||
const auto ngPrec = element::f32;
|
||||
ov::ParameterVector inputParams{std::make_shared<ov::op::v0::Parameter>(ngPrec, ov::Shape(inShapes.first)),
|
||||
std::make_shared<ov::op::v0::Parameter>(ngPrec, ov::Shape(inShapes.second))};
|
||||
ov::ParameterVector inputParams{std::make_shared<ov::op::v0::Parameter>(ngPrec, inShapes.first),
|
||||
std::make_shared<ov::op::v0::Parameter>(ngPrec, inShapes.second)};
|
||||
const auto matMul = std::make_shared<ov::op::v0::MatMul>(inputParams[0], inputParams[1], false, false);
|
||||
|
||||
selectedType = makeSelectedTypeStr(with_cpu_x86_avx512_core() ? "brgemm_avx512" : "jit_gemm", ngPrec);
|
||||
@ -67,18 +69,20 @@ protected:
|
||||
};
|
||||
|
||||
TEST_P(AlignMatMulInputRanksTest, CompareWithRefs) {
|
||||
Run();
|
||||
CheckNumberOfNodesWithType(executableNetwork, "Reshape", expectedNumOfReshapes); // Squeeze / Unsqueeze turns into Reshape
|
||||
CheckPluginRelatedResults(executableNetwork, "MatMul");
|
||||
run();
|
||||
CheckNumberOfNodesWithType(compiledModel,
|
||||
"Reshape",
|
||||
expectedNumOfReshapes); // Squeeze / Unsqueeze turns into Reshape
|
||||
CheckPluginRelatedResults(compiledModel, "MatMul");
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
const std::vector<std::pair<SizeVector, SizeVector>> supportedInputShapes = {
|
||||
{{4, 10, 5}, {1, 5, 10}}, // nothing to be done
|
||||
{{3}, {3}}, // 3x1 * 1x3 -> 1
|
||||
{{18}, {1, 5, 18, 20}}, // 1x1x1x18 * 1x5x18x20 -> 1x5x20
|
||||
{{2, 3, 4, 4, 4, 10, 5}, {5}}, // 2x3x4x4x4x10x5 * 1x1x1x1x1x5x1 -> 1x1x1x1x1x5
|
||||
const std::vector<std::pair<ov::Shape, ov::Shape>> supportedInputShapes = {
|
||||
{{4, 10, 5}, {1, 5, 10}}, // nothing to be done
|
||||
{{3}, {3}}, // 3x1 * 1x3 -> 1
|
||||
{{18}, {1, 5, 18, 20}}, // 1x1x1x18 * 1x5x18x20 -> 1x5x20
|
||||
{{2, 3, 4, 4, 4, 10, 5}, {5}}, // 2x3x4x4x4x10x5 * 1x1x1x1x1x5x1 -> 1x1x1x1x1x5
|
||||
{{1, 18}, {1, 5, 18, 20}},
|
||||
{{1, 70, 18}, {1, 5, 18, 20}},
|
||||
{{7, 1, 10, 3, 2, 7}, {1, 7, 5}},
|
||||
@ -86,16 +90,18 @@ const std::vector<std::pair<SizeVector, SizeVector>> supportedInputShapes = {
|
||||
};
|
||||
|
||||
// verify fusing just in case
|
||||
std::vector<fusingSpecificParams> fusingParamsSet {
|
||||
emptyFusingSpec,
|
||||
fusingElu,
|
||||
std::vector<fusingSpecificParams> fusingParamsSet{
|
||||
emptyFusingSpec,
|
||||
fusingElu,
|
||||
};
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_Check, AlignMatMulInputRanksTest,
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_Check,
|
||||
AlignMatMulInputRanksTest,
|
||||
::testing::Combine(::testing::ValuesIn(supportedInputShapes),
|
||||
::testing::ValuesIn(fusingParamsSet)),
|
||||
AlignMatMulInputRanksTest::getTestCaseName);
|
||||
|
||||
} // namespace
|
||||
} // namespace
|
||||
|
||||
} // namespace SubgraphTestsDefinitions
|
||||
} // namespace test
|
||||
} // namespace ov
|
||||
|
@ -2,12 +2,13 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "shared_test_classes/base/layer_test_utils.hpp"
|
||||
#include <ngraph/opsets/opset8.hpp>
|
||||
#include <exec_graph_info.hpp>
|
||||
|
||||
#include "openvino/runtime/exec_model_info.hpp"
|
||||
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||
|
||||
namespace SubgraphTestsDefinitions {
|
||||
namespace ov {
|
||||
namespace test {
|
||||
|
||||
using namespace ngraph;
|
||||
|
||||
@ -25,30 +26,33 @@ using namespace ngraph;
|
||||
input's precision if its child has Subgraph consumers.
|
||||
Same scenario happens when we have Eltwise instead of Subgraph - to be addressed in #78939.
|
||||
*/
|
||||
class GatherAddAvgpool : virtual public LayerTestsUtils::LayerTestsCommon {
|
||||
class GatherAddAvgpool : virtual public SubgraphBaseStaticTest {
|
||||
protected:
|
||||
void SetUp() override {
|
||||
targetDevice = ov::test::utils::DEVICE_CPU;
|
||||
inPrc = InferenceEngine::Precision::U8;
|
||||
outPrc = InferenceEngine::Precision::FP32;
|
||||
auto type = element::f32;
|
||||
auto param = std::make_shared<opset8::Parameter>(type, Shape{1, 3, 64, 64});
|
||||
auto gather = std::make_shared<opset8::Gather>(param,
|
||||
op::Constant::create(element::i32, Shape{3}, {2, 1, 0}),
|
||||
op::Constant::create(element::i32, Shape{1}, {1}));
|
||||
auto add = std::make_shared<opset8::Add>(gather, op::Constant::create(type, Shape{1, 3, 1, 1}, {3}));
|
||||
auto avgpool = std::make_shared<opset8::AvgPool>(add, Strides{1, 1}, Shape{0, 0}, Shape{0, 0}, Shape{2, 2}, false);
|
||||
function = std::make_shared<Function>(avgpool, ParameterVector{param});
|
||||
inType = ov::element::u8;
|
||||
outType = ov::element::f32;
|
||||
auto type = ov::element::f32;
|
||||
auto param = std::make_shared<ov::op::v0::Parameter>(type, Shape{1, 3, 64, 64});
|
||||
auto gather =
|
||||
std::make_shared<ov::op::v8::Gather>(param,
|
||||
ov::op::v0::Constant::create(element::i32, Shape{3}, {2, 1, 0}),
|
||||
ov::op::v0::Constant::create(element::i32, Shape{1}, {1}));
|
||||
auto add =
|
||||
std::make_shared<ov::op::v1::Add>(gather, ov::op::v0::Constant::create(type, Shape{1, 3, 1, 1}, {3}));
|
||||
auto avgpool =
|
||||
std::make_shared<ov::op::v1::AvgPool>(add, Strides{1, 1}, Shape{0, 0}, Shape{0, 0}, Shape{2, 2}, false);
|
||||
function = std::make_shared<ov::Model>(avgpool, ov::ParameterVector{param});
|
||||
}
|
||||
|
||||
void TearDown() override {
|
||||
auto exec_model = executableNetwork.GetExecGraphInfo().getFunction();
|
||||
auto exec_model = compiledModel.get_runtime_model();
|
||||
|
||||
int eltwise_nodes_found = 0;
|
||||
int pool_nodes_found = 0;
|
||||
for (const auto& n : exec_model->get_ordered_ops()) {
|
||||
auto layer_type = n->get_rt_info().at(ExecGraphInfoSerialization::LAYER_TYPE).as<std::string>();
|
||||
auto output_layout = n->get_rt_info().at(ExecGraphInfoSerialization::OUTPUT_LAYOUTS).as<std::string>();
|
||||
auto layer_type = n->get_rt_info().at(ov::exec_model_info::LAYER_TYPE).as<std::string>();
|
||||
auto output_layout = n->get_rt_info().at(ov::exec_model_info::OUTPUT_LAYOUTS).as<std::string>();
|
||||
if (layer_type == "Subgraph") {
|
||||
eltwise_nodes_found++;
|
||||
ASSERT_EQ("abcd", output_layout);
|
||||
@ -63,7 +67,8 @@ protected:
|
||||
};
|
||||
|
||||
TEST_F(GatherAddAvgpool, smoke_CompareWithRefs) {
|
||||
Run();
|
||||
run();
|
||||
}
|
||||
|
||||
} // namespace SubgraphTestsDefinitions
|
||||
} // namespace test
|
||||
} // namespace ov
|
||||
|
@ -2,39 +2,38 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <ov_models/builders.hpp>
|
||||
#include "ie_common.h"
|
||||
#include "ov_models/utils/ov_helpers.hpp"
|
||||
#include "test_utils/cpu_test_utils.hpp"
|
||||
#include "common_test_utils/node_builders/eltwise.hpp"
|
||||
#include "common_test_utils/ov_tensor_utils.hpp"
|
||||
#include "ov_models/builders.hpp"
|
||||
#include "ov_models/utils/ov_helpers.hpp"
|
||||
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||
#include "test_utils/cpu_test_utils.hpp"
|
||||
|
||||
using namespace InferenceEngine;
|
||||
using namespace CPUTestUtils;
|
||||
|
||||
namespace CPULayerTestsDefinitions {
|
||||
namespace ov {
|
||||
namespace test {
|
||||
|
||||
class InputNoReorderEltwiseBF16 : virtual public LayerTestsUtils::LayerTestsCommon,
|
||||
public CPUTestsBase {
|
||||
class InputNoReorderEltwiseBF16 : virtual public SubgraphBaseStaticTest, public CPUTestsBase {
|
||||
protected:
|
||||
void SetUp() override {
|
||||
auto netPrecision = inPrc = Precision::FP32;
|
||||
outPrc = Precision::BF16;
|
||||
auto netPrecision = inType = ov::element::f32;
|
||||
outType = ov::element::bf16;
|
||||
targetDevice = ov::test::utils::DEVICE_CPU;
|
||||
std::map<std::string, std::string> additional_config{{PluginConfigParams::KEY_ENFORCE_BF16, PluginConfigParams::NO}};
|
||||
ov::AnyMap additional_config{ov::hint::inference_precision(ov::element::bf16)};
|
||||
configuration.insert(additional_config.begin(), additional_config.end());
|
||||
|
||||
std::vector<size_t> inputShape {2, 4, 4, 1};
|
||||
auto eltwiseType = ngraph::helpers::EltwiseTypes::ADD;
|
||||
ov::Shape inputShape{2, 4, 4, 1};
|
||||
auto eltwiseType = ov::test::utils::EltwiseTypes::ADD;
|
||||
|
||||
auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
|
||||
ov::ParameterVector input {std::make_shared<ov::op::v0::Parameter>(ngPrc, ov::Shape(inputShape))};
|
||||
ov::ParameterVector input{std::make_shared<ov::op::v0::Parameter>(netPrecision, inputShape)};
|
||||
|
||||
auto tensor = ov::test::utils::create_and_fill_tensor(ngPrc, inputShape);
|
||||
auto tensor = ov::test::utils::create_and_fill_tensor(netPrecision, inputShape);
|
||||
auto secondaryInput = std::make_shared<ov::op::v0::Constant>(tensor);
|
||||
|
||||
auto eltwise = ngraph::builder::makeEltwise(input[0], secondaryInput, eltwiseType);
|
||||
auto eltwise = ov::test::utils::makeEltwise(input[0], secondaryInput, eltwiseType);
|
||||
|
||||
function = makeNgraphFunction(ngPrc, input, eltwise, "Eltwise");
|
||||
function = makeNgraphFunction(netPrecision, input, eltwise, "Eltwise");
|
||||
}
|
||||
};
|
||||
|
||||
@ -53,10 +52,11 @@ protected:
|
||||
Output[BF16]
|
||||
*/
|
||||
TEST_F(InputNoReorderEltwiseBF16, smoke_CompareWithRefs) {
|
||||
Run();
|
||||
run();
|
||||
|
||||
CheckNumberOfNodesWithType(executableNetwork, "Reorder", 0);
|
||||
CheckNumberOfNodesWithType(executableNetwork, "Convert", 0);
|
||||
CheckNumberOfNodesWithTypes(executableNetwork, {"Eltwise", "Subgraph"}, 1);
|
||||
CheckNumberOfNodesWithType(compiledModel, "Reorder", 0);
|
||||
CheckNumberOfNodesWithType(compiledModel, "Convert", 0);
|
||||
CheckNumberOfNodesWithTypes(compiledModel, {"Eltwise", "Subgraph"}, 1);
|
||||
}
|
||||
} // namespace CPULayerTestsDefinitions
|
||||
} // namespace test
|
||||
} // namespace ov
|
||||
|
Loading…
Reference in New Issue
Block a user