Merge branch 'master' into itikhono/ts/slice
This commit is contained in:
commit
cf70ced2a9
@ -17,9 +17,9 @@ namespace ngraph {
|
||||
namespace pass {
|
||||
namespace device {
|
||||
|
||||
class ConvertOpSet1ToDeviceSpecific: public ngraph::pass::FunctionPass {
|
||||
class ConvertOpSet1ToDeviceSpecific: public ov::pass::ModelPass {
|
||||
public:
|
||||
bool run_on_function(std::shared_ptr<ngraph::Function> f) override {
|
||||
bool run_on_model(const std::shared_ptr<ngraph::Function>& f) override {
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
@ -24,6 +24,11 @@ OV_CC_DOMAINS(ov_pass);
|
||||
# define ADD_MATCHER(obj, region, ...) obj->add_matcher<region>(__VA_ARGS__);
|
||||
# define REGISTER_PASS(obj, region, ...) obj.register_pass<region>(__VA_ARGS__);
|
||||
# define REGISTER_DISABLED_PASS(obj, region, ...) obj.register_pass<region, false>(__VA_ARGS__);
|
||||
|
||||
# define OV_PASS_CALLBACK(matcher) \
|
||||
openvino::itt::handle_t m_callback_handle; \
|
||||
m_callback_handle = openvino::itt::handle(matcher->get_name()); \
|
||||
OV_ITT_SCOPED_TASK(SIMPLE_ov_pass, m_callback_handle)
|
||||
#elif defined(SELECTIVE_BUILD)
|
||||
|
||||
# define MATCHER_SCOPE_(scope, region) \
|
||||
@ -70,6 +75,7 @@ OV_CC_DOMAINS(ov_pass);
|
||||
# define REGISTER_DISABLED_PASS(obj, region, ...) \
|
||||
OV_PP_CAT(REGISTER_PASS_WITH_FALSE_, OV_CC_SCOPE_IS_ENABLED(OV_PP_CAT3(ov_pass, _, region))) \
|
||||
(obj, region, __VA_ARGS__)
|
||||
# define OV_PASS_CALLBACK(matcher)
|
||||
#else
|
||||
|
||||
# define MATCHER_SCOPE(region) const std::string matcher_name(OV_PP_TOSTRING(region))
|
||||
@ -79,6 +85,7 @@ OV_CC_DOMAINS(ov_pass);
|
||||
# define ADD_MATCHER(obj, region, ...) obj->add_matcher<region>(__VA_ARGS__);
|
||||
# define REGISTER_PASS(obj, region, ...) obj.register_pass<region>(__VA_ARGS__);
|
||||
# define REGISTER_DISABLED_PASS(obj, region, ...) obj.register_pass<region, false>(__VA_ARGS__);
|
||||
# define OV_PASS_CALLBACK(matcher)
|
||||
#endif
|
||||
|
||||
#define ADD_MATCHER_FOR_THIS(region, ...) ADD_MATCHER(this, region, __VA_ARGS__)
|
||||
|
@ -5,7 +5,6 @@
|
||||
#include <low_precision/layer_transformation.hpp>
|
||||
#include <low_precision/network_helper.hpp>
|
||||
|
||||
|
||||
#include <algorithm>
|
||||
#include <cmath>
|
||||
#include <limits>
|
||||
@ -448,9 +447,24 @@ void LayerTransformation::addPattern(ngraph::pass::GraphRewrite& pass, Transform
|
||||
};
|
||||
// TODO: better name for matcher? required?
|
||||
auto m = std::make_shared<ngraph::pattern::Matcher>(patternRoot, matcher_name);
|
||||
NGRAPH_SUPPRESS_DEPRECATED_START
|
||||
pass.add_matcher(m, internal_callback, ngraph::pass::PassProperty::CHANGE_DYNAMIC_STATE);
|
||||
NGRAPH_SUPPRESS_DEPRECATED_END
|
||||
auto match_pass = std::make_shared<ov::pass::MatcherPass>(
|
||||
m->get_name(),
|
||||
m,
|
||||
[m, internal_callback](const std::shared_ptr<Node>& node) -> bool {
|
||||
NGRAPH_DEBUG << "Running matcher " << m->get_name() << " on " << node;
|
||||
OV_PASS_CALLBACK(m);
|
||||
if (std::dynamic_pointer_cast<ov::pass::pattern::Matcher>(m)->match(node->output(0))) {
|
||||
NGRAPH_DEBUG << "Matcher " << m->get_name() << " matched " << node;
|
||||
bool status = internal_callback(*m.get());
|
||||
// explicitly clear Matcher state because it holds pointers to matched nodes
|
||||
m->clear_state();
|
||||
return status;
|
||||
}
|
||||
m->clear_state();
|
||||
return false;
|
||||
},
|
||||
ov::pass::PassProperty::CHANGE_DYNAMIC_STATE);
|
||||
pass.add_matcher(match_pass);
|
||||
}
|
||||
|
||||
} // namespace low_precision
|
||||
|
@ -5,7 +5,6 @@
|
||||
#include "low_precision/low_precision.hpp"
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include <ngraph/ngraph.hpp>
|
||||
#include <ngraph/pass/manager.hpp>
|
||||
#include <ngraph/pass/constant_folding.hpp>
|
||||
@ -134,9 +133,24 @@ void make_matcher_type_relaxed(ngraph::pass::GraphRewrite* transformation) {
|
||||
};
|
||||
|
||||
auto m = std::make_shared<ngraph::pattern::Matcher>(p_node, matcher_name);
|
||||
NGRAPH_SUPPRESS_DEPRECATED_START
|
||||
transformation->add_matcher(m, callback, ngraph::pass::PassProperty::CHANGE_DYNAMIC_STATE);
|
||||
NGRAPH_SUPPRESS_DEPRECATED_END
|
||||
auto match_pass = std::make_shared<ov::pass::MatcherPass>(
|
||||
m->get_name(),
|
||||
m,
|
||||
[m, callback](const std::shared_ptr<Node>& node) -> bool {
|
||||
NGRAPH_DEBUG << "Running matcher " << m->get_name() << " on " << node;
|
||||
if (std::dynamic_pointer_cast<ov::pass::pattern::Matcher>(m)->match(node->output(0))) {
|
||||
NGRAPH_DEBUG << "Matcher " << m->get_name() << " matched " << node;
|
||||
OV_PASS_CALLBACK(m);
|
||||
bool status = callback(*m.get());
|
||||
// explicitly clear Matcher state because it holds pointers to matched nodes
|
||||
m->clear_state();
|
||||
return status;
|
||||
}
|
||||
m->clear_state();
|
||||
return false;
|
||||
},
|
||||
ov::pass::PassProperty::CHANGE_DYNAMIC_STATE);
|
||||
transformation->add_matcher(match_pass);
|
||||
}
|
||||
|
||||
ngraph::pass::low_precision::TypeRelaxedReplacer::TypeRelaxedReplacer() {
|
||||
|
@ -80,7 +80,7 @@ public:
|
||||
0);
|
||||
|
||||
ngraph::pass::low_precision::TypeRelaxedReplacer pass;
|
||||
pass.run_on_function(actualFunction);
|
||||
pass.run_on_model(actualFunction);
|
||||
|
||||
auto supportedPrecisionsOnActivation = std::vector<ngraph::pass::low_precision::PrecisionsRestriction>(
|
||||
{ngraph::pass::low_precision::PrecisionsRestriction::create<ngraph::opset1::Convolution>(
|
||||
@ -129,7 +129,7 @@ public:
|
||||
};
|
||||
|
||||
TEST_P(MarkupAvgPoolPrecisionsTransformation, CompareFunctions) {
|
||||
ov::pass::InitNodeInfo().run_on_function(actualFunction);
|
||||
ov::pass::InitNodeInfo().run_on_model(actualFunction);
|
||||
actualFunction->validate_nodes_and_infer_types();
|
||||
|
||||
const auto avgPoolOperations = LayerTransformation::get<opset1::AvgPool>(actualFunction);
|
||||
|
@ -267,7 +267,7 @@ TEST_F(TransformationTestsF, PropagateMasksBasic) {
|
||||
}
|
||||
if (VISUALIZE_TESTS_TREE)
|
||||
ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PropagateMasksBasic.svg")
|
||||
.run_on_function(function);
|
||||
.run_on_model(function);
|
||||
{
|
||||
pass::Manager m;
|
||||
m.register_pass<pass::InitMasks>();
|
||||
@ -352,7 +352,7 @@ TEST_F(TransformationTestsF, PropagateMasksDynamicConvolution) {
|
||||
}
|
||||
if (VISUALIZE_TESTS_TREE)
|
||||
ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PropagateMasksDynamicConvolution.svg")
|
||||
.run_on_function(function);
|
||||
.run_on_model(function);
|
||||
|
||||
{
|
||||
pass::Manager m;
|
||||
@ -403,7 +403,7 @@ TEST(TransformationTests, PropagateMasksDynamicReshape) {
|
||||
auto function = std::make_shared<Function>(NodeVector{conv2}, ParameterVector{input});
|
||||
if (VISUALIZE_TESTS_TREE)
|
||||
ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PropagateMasksDynamicReshape.svg")
|
||||
.run_on_function(function);
|
||||
.run_on_model(function);
|
||||
|
||||
pass::Manager m;
|
||||
m.register_pass<pass::Pruning>();
|
||||
@ -448,7 +448,7 @@ TEST(TransformationTests, PropagateMasksDynamicGroupConvolution) {
|
||||
|
||||
if (VISUALIZE_TESTS_TREE)
|
||||
ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PropagateMasksDynamicGroupConvolution.svg")
|
||||
.run_on_function(f);
|
||||
.run_on_model(f);
|
||||
|
||||
pass::Manager m;
|
||||
m.register_pass<pass::InitMasks>();
|
||||
@ -486,7 +486,7 @@ TEST(TransformationTests, PropagateMasksEmpty) {
|
||||
auto f = std::make_shared<Function>(NodeVector{conv2}, ParameterVector{input});
|
||||
|
||||
if (VISUALIZE_TESTS_TREE)
|
||||
ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PropagateMasksEmpty.svg").run_on_function(f);
|
||||
ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PropagateMasksEmpty.svg").run_on_model(f);
|
||||
|
||||
pass::Manager m;
|
||||
m.register_pass<pass::InitMasks>();
|
||||
@ -583,7 +583,7 @@ TEST_F(TransformationTestsF, PropagateMaskPassThrough) {
|
||||
}
|
||||
if (VISUALIZE_TESTS_TREE)
|
||||
ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PropagateMaskPassThrough.svg")
|
||||
.run_on_function(function);
|
||||
.run_on_model(function);
|
||||
{
|
||||
pass::Manager m;
|
||||
m.register_pass<pass::InitMasks>();
|
||||
@ -735,7 +735,7 @@ TEST_F(TransformationTestsF, PropagateMasksHardDependencies) {
|
||||
|
||||
if (VISUALIZE_TESTS_TREE)
|
||||
ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PropagateMasksHardDependencies.svg")
|
||||
.run_on_function(function);
|
||||
.run_on_model(function);
|
||||
{
|
||||
pass::Manager m;
|
||||
m.register_pass<pass::InitMasks>();
|
||||
@ -886,7 +886,7 @@ TEST_F(TransformationTestsF, PropagateMasksQuantizedGroupConvolution) {
|
||||
}
|
||||
if (VISUALIZE_TESTS_TREE)
|
||||
ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PropagateMasksQuantizedGroupConvolution.svg")
|
||||
.run_on_function(function);
|
||||
.run_on_model(function);
|
||||
{
|
||||
pass::Manager m;
|
||||
m.register_pass<pass::InitMasks>();
|
||||
@ -1053,7 +1053,7 @@ TEST_F(TransformationTestsF, PropagateMasksQuantizedGroupConvolutionWithShapeOf)
|
||||
if (VISUALIZE_TESTS_TREE)
|
||||
ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) +
|
||||
"PropagateMasksQuantizedGroupConvolutionWithShapeOf.svg")
|
||||
.run_on_function(function);
|
||||
.run_on_model(function);
|
||||
{
|
||||
pass::Manager m;
|
||||
m.register_pass<pass::InitMasks>();
|
||||
@ -1185,7 +1185,7 @@ TEST_F(TransformationTestsF, PropagateMasksFakeQuantizePerTensor) {
|
||||
}
|
||||
if (VISUALIZE_TESTS_TREE)
|
||||
ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PropagateMasksFakeQuantizePerTensor.svg")
|
||||
.run_on_function(function);
|
||||
.run_on_model(function);
|
||||
|
||||
{
|
||||
pass::Manager m;
|
||||
@ -1269,7 +1269,7 @@ TEST(TransformationTests, PropagateMasksFakeQuantizePerTensor1DScale) {
|
||||
auto function = std::make_shared<Function>(NodeVector{conv2}, ParameterVector{input});
|
||||
if (VISUALIZE_TESTS_TREE)
|
||||
ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PropagateMasksFakeQuantizePerTensor1DScale.svg")
|
||||
.run_on_function(function);
|
||||
.run_on_model(function);
|
||||
|
||||
{
|
||||
pass::Manager m;
|
||||
@ -1387,7 +1387,7 @@ TEST_F(TransformationTestsF, PropagateMasksFakeQuantizePerChannel) {
|
||||
}
|
||||
if (VISUALIZE_TESTS_TREE)
|
||||
ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PropagateMasksFakeQuantizePerChannel.svg")
|
||||
.run_on_function(function);
|
||||
.run_on_model(function);
|
||||
{
|
||||
pass::Manager m;
|
||||
// Masks for fq input parammeters didn't saved after
|
||||
@ -1530,7 +1530,7 @@ TEST_F(TransformationTestsF, TestConcatMaskPropagation) {
|
||||
|
||||
if (VISUALIZE_TESTS_TREE)
|
||||
ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "TestConcatMaskPropagation.svg")
|
||||
.run_on_function(function);
|
||||
.run_on_model(function);
|
||||
{
|
||||
pass::Manager m;
|
||||
m.register_pass<pass::InitMasks>();
|
||||
@ -1673,7 +1673,7 @@ TEST_F(TransformationTestsF, TestConcatMaskPropagationUp) {
|
||||
}
|
||||
if (VISUALIZE_TESTS_TREE)
|
||||
ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "TestConcatMaskPropagationUp.svg")
|
||||
.run_on_function(function);
|
||||
.run_on_model(function);
|
||||
{
|
||||
pass::Manager m;
|
||||
m.register_pass<pass::InitMasks>();
|
||||
@ -1744,7 +1744,7 @@ TEST(TransformationTests, TestConcatMaskPropagationUpEmpty) {
|
||||
|
||||
if (VISUALIZE_TESTS_TREE)
|
||||
ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "TestConcatMaskPropagationUpEmpty.svg")
|
||||
.run_on_function(f);
|
||||
.run_on_model(f);
|
||||
|
||||
pass::Manager m;
|
||||
m.register_pass<pass::InitMasks>();
|
||||
@ -1806,7 +1806,7 @@ TEST_F(TransformationTestsF, PruneConvIsClosingAndInGroup) {
|
||||
|
||||
if (VISUALIZE_TESTS_TREE)
|
||||
ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PruneConvIsClosingAndInGroup.svg")
|
||||
.run_on_function(function);
|
||||
.run_on_model(function);
|
||||
{
|
||||
auto input = std::make_shared<opset10::Parameter>(element::f32, inputShapes);
|
||||
auto weights = create_constant_with_zeros(
|
||||
@ -1922,7 +1922,7 @@ TEST(TransformationTests, PruneBranchingStopOp) {
|
||||
|
||||
if (VISUALIZE_TESTS_TREE)
|
||||
ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PruneBranchingStopOp.svg")
|
||||
.run_on_function(function);
|
||||
.run_on_model(function);
|
||||
|
||||
pass::Manager m;
|
||||
m.register_pass<pass::Pruning>();
|
||||
@ -1977,7 +1977,7 @@ TEST(TransformationTests, PruneStopOpUp) {
|
||||
auto function = std::make_shared<ngraph::Function>(OutputVector{end_conv}, ParameterVector{input}, "StopOpUp");
|
||||
|
||||
if (VISUALIZE_TESTS_TREE)
|
||||
ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PruneStopOpUp.svg").run_on_function(function);
|
||||
ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PruneStopOpUp.svg").run_on_model(function);
|
||||
|
||||
pass::Manager m;
|
||||
m.register_pass<pass::Pruning>();
|
||||
@ -2044,8 +2044,7 @@ TEST_F(TransformationTestsF, PruneReducelayerUp) {
|
||||
function_ref = std::make_shared<ngraph::Function>(OutputVector{conv_1}, ParameterVector{input});
|
||||
}
|
||||
if (VISUALIZE_TESTS_TREE)
|
||||
ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PruneReducelayerUp.svg")
|
||||
.run_on_function(function);
|
||||
ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PruneReducelayerUp.svg").run_on_model(function);
|
||||
|
||||
pass::Manager m;
|
||||
m.register_pass<pass::InitMasks>();
|
||||
@ -2142,7 +2141,7 @@ TEST_F(TransformationTestsF, PruneReduceLayerDown) {
|
||||
}
|
||||
if (VISUALIZE_TESTS_TREE)
|
||||
ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PruneReduceLayerDown.svg")
|
||||
.run_on_function(function);
|
||||
.run_on_model(function);
|
||||
{
|
||||
pass::Manager m;
|
||||
m.register_pass<pass::InitMasks>();
|
||||
@ -2194,7 +2193,7 @@ TEST(TransformationTests, PruneStopReducelayerUp) {
|
||||
|
||||
if (VISUALIZE_TESTS_TREE)
|
||||
ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PruneStopReducelayerUp.svg")
|
||||
.run_on_function(function);
|
||||
.run_on_model(function);
|
||||
|
||||
pass::Manager m;
|
||||
m.register_pass<pass::Pruning>();
|
||||
@ -2252,7 +2251,7 @@ TEST(TransformationTests, PruneStopReduceLayerDown) {
|
||||
|
||||
if (VISUALIZE_TESTS_TREE)
|
||||
ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PruneStopReduceLayerDown.svg")
|
||||
.run_on_function(function);
|
||||
.run_on_model(function);
|
||||
|
||||
pass::Manager m;
|
||||
m.register_pass<pass::Pruning>();
|
||||
@ -2327,7 +2326,7 @@ TEST_F(TransformationTestsF, MaskPropagationReshapeUp) {
|
||||
}
|
||||
if (VISUALIZE_TESTS_TREE)
|
||||
ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "MaskPropagationReshapeUp.svg")
|
||||
.run_on_function(function);
|
||||
.run_on_model(function);
|
||||
{
|
||||
pass::Manager m;
|
||||
m.register_pass<pass::InitMasks>();
|
||||
@ -2438,7 +2437,7 @@ TEST_P(TransformationTestsBoolParamF, MaskPropagationReshapeUpWithShapeOf) {
|
||||
const auto postfix = use_shape_of ? "True" : "False";
|
||||
ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "MaskPropagationReshapeUpWithShapeOf" + postfix +
|
||||
".svg")
|
||||
.run_on_function(function);
|
||||
.run_on_model(function);
|
||||
}
|
||||
{
|
||||
pass::Manager m;
|
||||
@ -2550,7 +2549,7 @@ TEST_F(TransformationTestsF, MaskPropagationReshapeUpShapeSubGraph) {
|
||||
|
||||
if (VISUALIZE_TESTS_TREE)
|
||||
ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "MaskPropagationReshapeUpShapeSubGraph.svg")
|
||||
.run_on_function(function);
|
||||
.run_on_model(function);
|
||||
{
|
||||
pass::Manager m;
|
||||
m.register_pass<pass::InitMasks>();
|
||||
@ -2642,7 +2641,7 @@ TEST_F(TransformationTestsF, MaskPropagationReshapeExtend) {
|
||||
|
||||
if (VISUALIZE_TESTS_TREE)
|
||||
ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "MaskPropagationReshapeExtend.svg")
|
||||
.run_on_function(function);
|
||||
.run_on_model(function);
|
||||
{
|
||||
pass::Manager m;
|
||||
m.register_pass<pass::InitMasks>();
|
||||
@ -2749,7 +2748,7 @@ TEST_F(DISABLED_TransformationTestsF, MaskPropagationReshapeDownMul) {
|
||||
}
|
||||
if (VISUALIZE_TESTS_TREE)
|
||||
ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "MaskPropagationReshapeDownMul.svg")
|
||||
.run_on_function(function);
|
||||
.run_on_model(function);
|
||||
{
|
||||
pass::Manager m;
|
||||
m.register_pass<pass::InitMasks>();
|
||||
@ -2853,7 +2852,7 @@ TEST_F(TransformationTestsF, MaskPropagationReshapeDownAdd) {
|
||||
}
|
||||
if (VISUALIZE_TESTS_TREE)
|
||||
ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "MaskPropagationReshapeDownAdd.svg")
|
||||
.run_on_function(function);
|
||||
.run_on_model(function);
|
||||
{
|
||||
pass::Manager m;
|
||||
m.register_pass<pass::InitMasks>();
|
||||
@ -2902,7 +2901,7 @@ TEST(TransformationTests, MaskPropagationStopReshapeUp) {
|
||||
auto function = std::make_shared<ngraph::Function>(OutputVector{conv_1}, ParameterVector{input});
|
||||
if (VISUALIZE_TESTS_TREE)
|
||||
ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "MaskPropagationStopReshapeUp.svg")
|
||||
.run_on_function(function);
|
||||
.run_on_model(function);
|
||||
{
|
||||
pass::Manager m;
|
||||
m.register_pass<pass::InitMasks>();
|
||||
@ -2959,7 +2958,7 @@ TEST(TransformationTests, MaskPropagationStopReshapeDown) {
|
||||
|
||||
if (VISUALIZE_TESTS_TREE)
|
||||
ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "MaskPropagationStopReshapeDown.svg")
|
||||
.run_on_function(function);
|
||||
.run_on_model(function);
|
||||
{
|
||||
pass::Manager m;
|
||||
m.register_pass<pass::InitMasks>();
|
||||
@ -3017,7 +3016,7 @@ TEST_F(TransformationTestsF, MaskPropagationReshapeUnsqueezeUp) {
|
||||
}
|
||||
if (VISUALIZE_TESTS_TREE)
|
||||
ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "MaskPropagationReshapeUnsqueezeUp.svg")
|
||||
.run_on_function(function);
|
||||
.run_on_model(function);
|
||||
{
|
||||
pass::Manager m;
|
||||
m.register_pass<pass::InitMasks>();
|
||||
@ -3079,7 +3078,7 @@ TEST_F(TransformationTestsF, MaskPropagationReshapeUnsqueezeDown) {
|
||||
}
|
||||
if (VISUALIZE_TESTS_TREE)
|
||||
ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "MaskPropagationReshapeUnsqueezeDown.svg")
|
||||
.run_on_function(function);
|
||||
.run_on_model(function);
|
||||
{
|
||||
pass::Manager m;
|
||||
m.register_pass<pass::InitMasks>();
|
||||
@ -3140,7 +3139,7 @@ TEST(TransformationTests, MaskPropagationWrongDimsElementwise) {
|
||||
|
||||
if (VISUALIZE_TESTS_TREE)
|
||||
ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "MaskPropagationWrongDimsElementwise.svg")
|
||||
.run_on_function(function);
|
||||
.run_on_model(function);
|
||||
|
||||
pass::Manager m;
|
||||
m.register_pass<pass::Pruning>();
|
||||
@ -3251,7 +3250,7 @@ TEST_F(TransformationTestsF, PruneSEBlock) {
|
||||
function_ref = std::make_shared<ngraph::Function>(OutputVector{end_conv}, ParameterVector{input}, "SEBlock");
|
||||
}
|
||||
if (VISUALIZE_TESTS_TREE)
|
||||
ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PruneSEBlock.svg").run_on_function(function);
|
||||
ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PruneSEBlock.svg").run_on_model(function);
|
||||
{
|
||||
pass::Manager m;
|
||||
m.register_pass<pass::InitMasks>();
|
||||
@ -3343,7 +3342,7 @@ TEST_F(TransformationTestsF, PropagateMasksLinear) {
|
||||
}
|
||||
if (VISUALIZE_TESTS_TREE)
|
||||
ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PropagateMasksLinear.svg")
|
||||
.run_on_function(function);
|
||||
.run_on_model(function);
|
||||
{
|
||||
pass::Manager m;
|
||||
m.register_pass<pass::InitMasks>();
|
||||
@ -3395,7 +3394,7 @@ TEST(TransformationTests, MaskPropagationMatMulStopEmptyABranch) {
|
||||
auto function = std::make_shared<ngraph::Function>(OutputVector{mul_left}, ParameterVector{input});
|
||||
if (VISUALIZE_TESTS_TREE)
|
||||
ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "MaskPropagationMatMulStopEmptyABranch.svg")
|
||||
.run_on_function(function);
|
||||
.run_on_model(function);
|
||||
{
|
||||
pass::Manager m;
|
||||
m.register_pass<pass::InitMasks>();
|
||||
@ -3462,7 +3461,7 @@ TEST(TransformationTests, PruneLinearUp) {
|
||||
auto function = std::make_shared<ngraph::Function>(OutputVector{last_linear}, ParameterVector{input});
|
||||
|
||||
if (VISUALIZE_TESTS_TREE)
|
||||
ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PruneLinearUp.svg").run_on_function(function);
|
||||
ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PruneLinearUp.svg").run_on_model(function);
|
||||
|
||||
pass::Manager m;
|
||||
m.register_pass<pass::Pruning>();
|
||||
@ -3519,8 +3518,7 @@ TEST(TransformationTests, PruneConvUpShort) {
|
||||
auto function = std::make_shared<ngraph::Function>(OutputVector{last_conv}, ParameterVector{input});
|
||||
|
||||
if (VISUALIZE_TESTS_TREE)
|
||||
ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PruneConvUpShort.svg")
|
||||
.run_on_function(function);
|
||||
ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PruneConvUpShort.svg").run_on_model(function);
|
||||
|
||||
pass::Manager m;
|
||||
m.register_pass<pass::Pruning>();
|
||||
@ -3595,7 +3593,7 @@ TEST_F(TransformationTestsF, MaskPropagationLinearOuterDims) {
|
||||
}
|
||||
if (VISUALIZE_TESTS_TREE)
|
||||
ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "MaskPropagationLinearOuterDims.svg")
|
||||
.run_on_function(function);
|
||||
.run_on_model(function);
|
||||
{
|
||||
pass::Manager m;
|
||||
m.register_pass<pass::InitMasks>();
|
||||
@ -3674,7 +3672,7 @@ TEST(TransformationTests, MaskPropagationStopLinearOuterDims) {
|
||||
|
||||
if (VISUALIZE_TESTS_TREE)
|
||||
ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "MaskPropagationStopLinearOuterDims.svg")
|
||||
.run_on_function(function);
|
||||
.run_on_model(function);
|
||||
{
|
||||
pass::Manager m;
|
||||
m.register_pass<pass::InitMasks>();
|
||||
@ -3765,7 +3763,7 @@ TEST_F(TransformationTestsF, PruneMasksMatMulColsStopRowsUp) {
|
||||
}
|
||||
if (VISUALIZE_TESTS_TREE)
|
||||
ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PruneMasksMatMulColsStopRowsUp.svg")
|
||||
.run_on_function(function);
|
||||
.run_on_model(function);
|
||||
{
|
||||
pass::Manager m;
|
||||
m.register_pass<pass::InitMasks>();
|
||||
@ -3854,7 +3852,7 @@ TEST_F(TransformationTestsF, PruneMasksMatMulRowsStopColsUp) {
|
||||
}
|
||||
if (VISUALIZE_TESTS_TREE)
|
||||
ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PruneMasksMatMulRowsStopColsUp.svg")
|
||||
.run_on_function(function);
|
||||
.run_on_model(function);
|
||||
{
|
||||
pass::Manager m;
|
||||
m.register_pass<pass::InitMasks>();
|
||||
@ -3949,8 +3947,7 @@ TEST_F(TransformationTestsF, PropagateFlattenUp) {
|
||||
function_ref = std::make_shared<Function>(NodeVector{linear}, ParameterVector{input});
|
||||
}
|
||||
if (VISUALIZE_TESTS_TREE)
|
||||
ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PropagateFlattenUp.svg")
|
||||
.run_on_function(function);
|
||||
ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PropagateFlattenUp.svg").run_on_model(function);
|
||||
{
|
||||
pass::Manager m;
|
||||
m.register_pass<pass::InitMasks>();
|
||||
@ -4025,7 +4022,7 @@ TEST_F(TransformationTestsF, PropagateFlattenDown) {
|
||||
}
|
||||
if (VISUALIZE_TESTS_TREE)
|
||||
ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PropagateFlattenDown.svg")
|
||||
.run_on_function(function);
|
||||
.run_on_model(function);
|
||||
{
|
||||
pass::Manager m;
|
||||
m.register_pass<pass::InitMasks>();
|
||||
@ -4084,7 +4081,7 @@ TEST_F(TransformationTestsF, PropagateMasksTranspose) {
|
||||
}
|
||||
if (VISUALIZE_TESTS_TREE)
|
||||
ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PropagateMasksTranspose.svg")
|
||||
.run_on_function(function);
|
||||
.run_on_model(function);
|
||||
{
|
||||
pass::Manager m;
|
||||
m.register_pass<pass::InitMasks>();
|
||||
@ -4157,7 +4154,7 @@ TEST_F(TransformationTestsF, PropagateMasksTransposeComplex) {
|
||||
}
|
||||
if (VISUALIZE_TESTS_TREE)
|
||||
ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PropagateMasksTransposeComplex.svg")
|
||||
.run_on_function(function);
|
||||
.run_on_model(function);
|
||||
{
|
||||
pass::Manager m;
|
||||
m.register_pass<pass::InitMasks>();
|
||||
@ -4197,7 +4194,7 @@ TEST(TransformationTests, PropagateMasksTransposeStop) {
|
||||
auto function = std::make_shared<Function>(NodeVector{last_mul}, ParameterVector{input});
|
||||
if (VISUALIZE_TESTS_TREE)
|
||||
ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PropagateMasksTransposeStop.svg")
|
||||
.run_on_function(function);
|
||||
.run_on_model(function);
|
||||
{
|
||||
pass::Manager m;
|
||||
m.register_pass<pass::InitMasks>();
|
||||
@ -4323,7 +4320,7 @@ TEST_F(DISABLED_TransformationTestsF, PropagateMasksBroadcastedEltwiseWithInputs
|
||||
}
|
||||
if (VISUALIZE_TESTS_TREE) {
|
||||
ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PropagateMasksBroadcastedEltwiseWithInputs.svg")
|
||||
.run_on_function(function);
|
||||
.run_on_model(function);
|
||||
}
|
||||
{
|
||||
pass::Manager m;
|
||||
@ -4500,7 +4497,7 @@ TEST_F(TransformationTestsF, PropagateMasksBroadcastedEltwise) {
|
||||
}
|
||||
if (VISUALIZE_TESTS_TREE) {
|
||||
ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PropagateMasksBroadcastedEltwise.svg")
|
||||
.run_on_function(function);
|
||||
.run_on_model(function);
|
||||
}
|
||||
{
|
||||
pass::Manager m;
|
||||
@ -4663,7 +4660,7 @@ TEST_F(TransformationTestsF, MaskPropagationComplexReshape) {
|
||||
}
|
||||
if (VISUALIZE_TESTS_TREE) {
|
||||
ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "MaskPropagationComplexReshape.svg")
|
||||
.run_on_function(function);
|
||||
.run_on_model(function);
|
||||
}
|
||||
{
|
||||
pass::Manager m;
|
||||
@ -4856,7 +4853,7 @@ TEST_P(TransformationTestsBoolParamF, MaskPropagationReshapedPassThroughP) {
|
||||
auto postfix = (add_shape_of) ? "True" : "False";
|
||||
ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "MaskPropagationReshapedPassThroughP" + postfix +
|
||||
".svg")
|
||||
.run_on_function(function);
|
||||
.run_on_model(function);
|
||||
}
|
||||
{
|
||||
pass::Manager m;
|
||||
@ -4981,7 +4978,7 @@ TEST_P(TransformationTestsBoolParamF, MaskPropagationBroadcastedSameRankEltwiseS
|
||||
auto postfix = (reverse_mul) ? "True" : "False";
|
||||
ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) +
|
||||
"MaskPropagationBroadcastedSameRankEltwiseSwappedLayoutP" + postfix + ".svg")
|
||||
.run_on_function(function);
|
||||
.run_on_model(function);
|
||||
}
|
||||
{
|
||||
pass::Manager m;
|
||||
@ -5028,7 +5025,7 @@ TEST(TransformationTests, MaskPropagationBroadcastedEltwiseInputAndWeightsBroadc
|
||||
if (VISUALIZE_TESTS_TREE) {
|
||||
ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) +
|
||||
"MaskPropagationBroadcastedEltwiseInputAndWeightsBroadcasted.svg")
|
||||
.run_on_function(function);
|
||||
.run_on_model(function);
|
||||
}
|
||||
{
|
||||
pass::Manager m;
|
||||
@ -5078,7 +5075,7 @@ TEST(TransformationTests, MaskPropagationBroadcastedEltwiseWrongBroadcastingMode
|
||||
if (VISUALIZE_TESTS_TREE) {
|
||||
ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) +
|
||||
"MaskPropagationBroadcastedEltwiseWrongBroadcastingMode.svg")
|
||||
.run_on_function(function);
|
||||
.run_on_model(function);
|
||||
}
|
||||
{
|
||||
pass::Manager m;
|
||||
@ -5143,7 +5140,7 @@ TEST_F(TransformationTestsF, MaskPropagationMatMulWithSeveralOutputs) {
|
||||
}
|
||||
if (VISUALIZE_TESTS_TREE)
|
||||
ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "MaskPropagationMatMulWithSeveralOutputs.svg")
|
||||
.run_on_function(function);
|
||||
.run_on_model(function);
|
||||
{
|
||||
pass::Manager m;
|
||||
m.register_pass<pass::InitMasks>();
|
||||
@ -5174,7 +5171,7 @@ TEST(TransformationTests, CheckReshapeWithNoConstInShape) {
|
||||
|
||||
if (VISUALIZE_TESTS_TREE)
|
||||
ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "CheckReshapeWithNoConstInShape.svg")
|
||||
.run_on_function(function);
|
||||
.run_on_model(function);
|
||||
|
||||
pass::Manager m;
|
||||
m.register_pass<pass::ShrinkWeights>();
|
||||
|
@ -257,14 +257,6 @@ public:
|
||||
return pass;
|
||||
}
|
||||
|
||||
OPENVINO_DEPRECATED("Use MatcherPass instead")
|
||||
void add_matcher(const std::shared_ptr<pattern::Matcher>& m,
|
||||
const graph_rewrite_callback& callback,
|
||||
const PassPropertyMask& property);
|
||||
|
||||
OPENVINO_DEPRECATED("Use MatcherPass instead")
|
||||
void add_matcher(const std::shared_ptr<pattern::Matcher>& m, const ov::graph_rewrite_callback& callback);
|
||||
|
||||
bool run_on_model(const std::shared_ptr<ov::Model>& m) override;
|
||||
|
||||
void set_pass_config(const std::shared_ptr<PassConfig>& pass_config) override;
|
||||
|
@ -76,36 +76,6 @@ public:
|
||||
/// \param new_state Value "true" enables Validate pass run; "false", otherwise
|
||||
void set_per_pass_validation(bool new_state);
|
||||
|
||||
/// \brief Callback is a lambda function that can be used by registered transformations.
|
||||
/// The main purpose of this callback is to provide a way for plugins to disable/enable
|
||||
/// transformations based on some conditions. In some cases plugins may want not to
|
||||
/// execute some
|
||||
/// transformations.
|
||||
/// For example plugin can disable unpleasant decompositions because of performance
|
||||
/// reasons for
|
||||
/// some cases.
|
||||
/// Callback example:
|
||||
/// auto callback = [](const std::shared_ptr<const ov::Node> & node) -> bool {
|
||||
/// return std::dynamic_pointer_cast<const ov::opset3::DepthToSpace>(node) !=
|
||||
/// nullptr;
|
||||
/// };
|
||||
/// This callback returns true in case of DepthToSpace operation. So when execution
|
||||
/// DepthToSpace
|
||||
/// decomposition pass will check is this decomposition needed or plugin can execute
|
||||
/// this
|
||||
/// operation directly. And of course on transformation side we need to have a response
|
||||
/// for this
|
||||
/// callback.
|
||||
/// if (transformation_callback(batch_to_space)) {
|
||||
/// return false;
|
||||
/// }
|
||||
/// \param callback lamda function that returns true in case if node is supported by
|
||||
/// plugin and
|
||||
/// transformation is not needed
|
||||
OPENVINO_DEPRECATED("Please use get_pass_config() to configure transformation pipeline")
|
||||
void set_callback(const param_callback& callback) {
|
||||
m_pass_config->set_callback(callback);
|
||||
}
|
||||
/// \return PassConfig shared object. This object is used for transformations pipeline
|
||||
/// configuration.
|
||||
/// This object allows to disable/enable transformations execution, set callback to
|
||||
|
@ -61,14 +61,6 @@ public:
|
||||
std::shared_ptr<PassConfig> get_pass_config() {
|
||||
return m_pass_config;
|
||||
}
|
||||
/// \brief Applies callback for given node. By default callback returns false.
|
||||
/// This method remains here only for backward compatibility and will be removed
|
||||
/// after all transformations are moved to transformation_callback() method.
|
||||
/// \return result of callback execution for given node
|
||||
OPENVINO_DEPRECATED("Please use transformation_callback method instead")
|
||||
bool m_transformation_callback(const std::shared_ptr<const Node>& node) {
|
||||
return m_pass_config->get_callback(get_type_info())(node);
|
||||
}
|
||||
|
||||
/// \brief Applies callback for given node. By default callback returns false.
|
||||
/// \param node which will be used inside callback
|
||||
@ -99,13 +91,7 @@ class OPENVINO_API ModelPass : public PassBase {
|
||||
public:
|
||||
OPENVINO_RTTI("ov::pass::ModelPass");
|
||||
~ModelPass() override;
|
||||
OPENVINO_DEPRECATED("run_on_function() method is deprecated. Please use run_on_model() instead.")
|
||||
virtual bool run_on_function(std::shared_ptr<ov::Model> m);
|
||||
virtual bool run_on_model(const std::shared_ptr<ov::Model>& m);
|
||||
|
||||
private:
|
||||
bool call_on_function{false};
|
||||
bool call_on_model{false};
|
||||
virtual bool run_on_model(const std::shared_ptr<ov::Model>& m) = 0;
|
||||
};
|
||||
|
||||
} // namespace pass
|
||||
|
@ -5,10 +5,11 @@
|
||||
#pragma once
|
||||
|
||||
#include <cstdint>
|
||||
#include <openvino/core/validation_util.hpp>
|
||||
#include <openvino/op/batch_to_space.hpp>
|
||||
#include <openvino/opsets/opset2.hpp>
|
||||
|
||||
#include "dimension_util.hpp"
|
||||
#include "openvino/core/validation_util.hpp"
|
||||
#include "openvino/op/batch_to_space.hpp"
|
||||
#include "openvino/opsets/opset2.hpp"
|
||||
#include "utils.hpp"
|
||||
|
||||
namespace ov {
|
||||
@ -19,6 +20,7 @@ template <class TShape>
|
||||
std::vector<TShape> shape_infer(const BatchToSpace* op,
|
||||
const std::vector<TShape>& input_shapes,
|
||||
const std::map<size_t, HostTensorPtr>& constant_data = {}) {
|
||||
using namespace ov::util;
|
||||
using ValType = typename TShape::value_type::value_type;
|
||||
NODE_VALIDATION_CHECK(op, input_shapes.size() == 4);
|
||||
|
||||
@ -43,13 +45,15 @@ std::vector<TShape> shape_infer(const BatchToSpace* op,
|
||||
"block_shape and crops inputs must have rank 1. Got: ",
|
||||
inputs_same_ps.rank());
|
||||
|
||||
const ov::Rank data_rank = data_shape.rank();
|
||||
const auto data_rank = data_shape.rank();
|
||||
if (data_rank.is_static()) {
|
||||
constexpr size_t spatial_dim_offset = 1;
|
||||
const auto data_rank_size = data_shape.size();
|
||||
|
||||
NODE_VALIDATION_CHECK(op,
|
||||
(data_shape.size() > spatial_dim_offset),
|
||||
(data_rank_size > spatial_dim_offset),
|
||||
"data input must have rank greater or equal than 2. Got: ",
|
||||
data_shape.size());
|
||||
data_rank_size);
|
||||
if (inputs_same_ps.is_static()) {
|
||||
NODE_VALIDATION_CHECK(op,
|
||||
data_rank.get_length() == inputs_same_ps[0].get_length(),
|
||||
@ -60,38 +64,52 @@ std::vector<TShape> shape_infer(const BatchToSpace* op,
|
||||
data_rank);
|
||||
}
|
||||
|
||||
auto out_shape = data_shape;
|
||||
std::vector<int64_t> block_val, crops_begin_val, crops_end_val;
|
||||
TShape out_shape;
|
||||
out_shape.reserve(data_rank_size);
|
||||
|
||||
if (get_data_as_int64<TShape>(1, op, block_val, constant_data) &&
|
||||
get_data_as_int64<TShape>(2, op, crops_begin_val, constant_data) &&
|
||||
get_data_as_int64<TShape>(3, op, crops_end_val, constant_data)) {
|
||||
const auto blocks = get_input_const_data_as<TShape, int64_t>(op, 1, constant_data);
|
||||
if (blocks) {
|
||||
NODE_VALIDATION_CHECK(op,
|
||||
std::none_of(begin(block_val), end(block_val), cmp::Less<int64_t>(1)),
|
||||
std::none_of(begin(*blocks), end(*blocks), cmp::Less<int64_t>(1)),
|
||||
"Elements of block_shape input must be greater or equal to one.");
|
||||
const auto divisor = static_cast<ValType>(
|
||||
std::accumulate(begin(*blocks), end(*blocks), int64_t(1), std::multiplies<int64_t>()));
|
||||
out_shape.push_back(data_shape[0] / divisor);
|
||||
check_divided_result(op, out_shape[0], data_shape[0], divisor);
|
||||
} else {
|
||||
out_shape.emplace_back(dim::inf_bound);
|
||||
}
|
||||
|
||||
std::vector<int64_t> crops_begin_val, crops_end_val;
|
||||
if (get_data_as_int64<TShape>(2, op, crops_begin_val, constant_data) &&
|
||||
get_data_as_int64<TShape>(3, op, crops_end_val, constant_data)) {
|
||||
constexpr auto is_invalid_crop = cmp::Less<int64_t>(0);
|
||||
NODE_VALIDATION_CHECK(op,
|
||||
std::none_of(begin(crops_begin_val), end(crops_begin_val), is_invalid_crop) &&
|
||||
std::none_of(begin(crops_end_val), end(crops_end_val), is_invalid_crop),
|
||||
"Elements of crops_begin and crops_end inputs must be greater or equal to zero.");
|
||||
|
||||
const auto divisor = static_cast<ValType>(
|
||||
std::accumulate(begin(block_val), end(block_val), int64_t(1), std::multiplies<int64_t>()));
|
||||
if (blocks) {
|
||||
for (auto idx = spatial_dim_offset; idx < data_rank_size; ++idx) {
|
||||
auto d = data_shape[idx] * static_cast<ValType>((*blocks)[idx]);
|
||||
auto crop = static_cast<ValType>(crops_begin_val[idx] + crops_end_val[idx]);
|
||||
NODE_VALIDATION_CHECK(
|
||||
op,
|
||||
d.is_dynamic() || crop <= d.get_length(),
|
||||
"crops_begin[i] + crops_end[i] must be less or equal to block_shape[i] * input_shape[i]");
|
||||
|
||||
out_shape[0] /= divisor;
|
||||
check_divided_result(op, out_shape[0], data_shape[0], divisor);
|
||||
|
||||
for (auto idx = spatial_dim_offset; idx < out_shape.size(); ++idx) {
|
||||
out_shape[idx] *= static_cast<ValType>(block_val[idx]);
|
||||
auto crop = static_cast<ValType>(crops_begin_val[idx] + crops_end_val[idx]);
|
||||
NODE_VALIDATION_CHECK(
|
||||
op,
|
||||
out_shape[idx].is_dynamic() || crop <= out_shape[idx].get_length(),
|
||||
"crops_begin[i] + crops_end[i] must be less or equal to block_shape[i] * input_shape[i]");
|
||||
|
||||
out_shape[idx] = out_shape[idx] - crop;
|
||||
out_shape.push_back(d - crop);
|
||||
}
|
||||
} else {
|
||||
const auto block = Dimension(1, dim::inf_bound);
|
||||
for (auto idx = spatial_dim_offset; idx < data_rank_size; ++idx) {
|
||||
auto d = data_shape[idx] * block;
|
||||
auto crop = static_cast<ValType>(crops_begin_val[idx] + crops_end_val[idx]);
|
||||
out_shape.push_back(d - crop);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
out_shape.insert(out_shape.end(), data_rank_size - spatial_dim_offset, Dimension::dynamic());
|
||||
}
|
||||
return {out_shape};
|
||||
} else {
|
||||
|
@ -5,10 +5,11 @@
|
||||
#pragma once
|
||||
|
||||
#include <cstdint>
|
||||
#include <openvino/core/validation_util.hpp>
|
||||
#include <openvino/op/space_to_batch.hpp>
|
||||
#include <openvino/opsets/opset2.hpp>
|
||||
|
||||
#include "dimension_util.hpp"
|
||||
#include "openvino/core/validation_util.hpp"
|
||||
#include "openvino/op/space_to_batch.hpp"
|
||||
#include "openvino/opsets/opset2.hpp"
|
||||
#include "utils.hpp"
|
||||
|
||||
namespace ov {
|
||||
@ -19,6 +20,7 @@ template <class TShape>
|
||||
std::vector<TShape> shape_infer(const SpaceToBatch* op,
|
||||
const std::vector<TShape>& input_shapes,
|
||||
const std::map<size_t, HostTensorPtr>& constant_data = {}) {
|
||||
using namespace ov::util;
|
||||
using TVal = typename TShape::value_type::value_type;
|
||||
NODE_VALIDATION_CHECK(op, input_shapes.size() == 4);
|
||||
|
||||
@ -45,30 +47,45 @@ std::vector<TShape> shape_infer(const SpaceToBatch* op,
|
||||
|
||||
if (data_shape.rank().is_static()) {
|
||||
constexpr size_t spatial_dim_offset = 1;
|
||||
const auto data_rank_size = data_shape.size();
|
||||
NODE_VALIDATION_CHECK(op,
|
||||
(data_shape.size() > spatial_dim_offset),
|
||||
(data_rank_size > spatial_dim_offset),
|
||||
"The data tensor with rank lower than 2 is not supported (data rank: ",
|
||||
data_shape.size(),
|
||||
data_rank_size,
|
||||
")");
|
||||
|
||||
auto out_shape = data_shape;
|
||||
std::vector<int64_t> block, pads_begin, pads_end;
|
||||
if (get_data_as_int64<TShape>(1, op, block, constant_data) &&
|
||||
get_data_as_int64<TShape>(2, op, pads_begin, constant_data) &&
|
||||
get_data_as_int64<TShape>(3, op, pads_end, constant_data)) {
|
||||
TVal block_prod = std::accumulate(begin(block), end(block), 1, std::multiplies<int64_t>());
|
||||
TShape out_shape;
|
||||
out_shape.reserve(data_rank_size);
|
||||
|
||||
out_shape[0] *= block_prod;
|
||||
for (auto idx = spatial_dim_offset; idx < out_shape.size(); ++idx) {
|
||||
NODE_VALIDATION_CHECK(op, block[idx] > 0, "block_shape values must be greater than 0");
|
||||
if (out_shape[idx].is_static() || out_shape[idx] != Dimension::dynamic()) {
|
||||
const auto padded_dim = out_shape[idx] + static_cast<TVal>(pads_begin[idx] + pads_end[idx]);
|
||||
const auto divisor = static_cast<TVal>(block[idx]);
|
||||
out_shape[idx] = padded_dim / divisor;
|
||||
check_divided_result(op, out_shape[idx], padded_dim, divisor);
|
||||
}
|
||||
}
|
||||
auto blocks = get_input_const_data_as<TShape, int64_t>(op, 1, constant_data);
|
||||
if (blocks) {
|
||||
TVal block_prod = std::accumulate(begin(*blocks), end(*blocks), 1, std::multiplies<int64_t>());
|
||||
out_shape.push_back(data_shape[0] * block_prod);
|
||||
} else {
|
||||
out_shape.emplace_back(dim::inf_bound);
|
||||
}
|
||||
|
||||
std::vector<int64_t> pads_begin, pads_end;
|
||||
if (blocks && get_data_as_int64<TShape>(2, op, pads_begin, constant_data) &&
|
||||
get_data_as_int64<TShape>(3, op, pads_end, constant_data)) {
|
||||
for (auto idx = spatial_dim_offset; idx < data_rank_size; ++idx) {
|
||||
NODE_VALIDATION_CHECK(op, (*blocks)[idx] > 0, "block_shape values must be greater than 0");
|
||||
|
||||
const auto padded_dim = data_shape[idx] + static_cast<TVal>(pads_begin[idx] + pads_end[idx]);
|
||||
const auto divisor = static_cast<TVal>((*blocks)[idx]);
|
||||
|
||||
if (padded_dim.get_max_length() == dim::inf_bound) {
|
||||
out_shape.emplace_back(ceil_div(padded_dim.get_min_length(), divisor), dim::inf_bound);
|
||||
} else {
|
||||
out_shape.push_back(padded_dim / divisor);
|
||||
}
|
||||
|
||||
check_divided_result(op, out_shape[idx], padded_dim, divisor);
|
||||
}
|
||||
} else {
|
||||
out_shape.insert(out_shape.end(), data_rank_size - spatial_dim_offset, dim::inf_bound);
|
||||
}
|
||||
|
||||
return {out_shape};
|
||||
} else {
|
||||
return {PartialShape::dynamic()};
|
||||
|
@ -33,10 +33,6 @@ OV_CC_DOMAINS(ov_opset);
|
||||
*/
|
||||
#if defined(SELECTIVE_BUILD_ANALYZER)
|
||||
# define OV_OP_SCOPE(region) OV_SCOPE(ov_op, region)
|
||||
# define OV_PASS_CALLBACK(matcher) \
|
||||
openvino::itt::handle_t m_callback_handle; \
|
||||
m_callback_handle = openvino::itt::handle(matcher->get_name()); \
|
||||
OV_ITT_SCOPED_TASK(SIMPLE_ov_pass, m_callback_handle)
|
||||
# define REGISTER_OP(opset_name, op_name) \
|
||||
OV_ITT_SCOPED_TASK(SIMPLE_ov_opset, openvino::itt::handle(opset_name + "_" + op_name))
|
||||
# define INSERT_OP(opset_name, op_name, op_namespace) opset.insert<op_namespace::op_name>()
|
||||
@ -44,14 +40,12 @@ OV_CC_DOMAINS(ov_opset);
|
||||
# define OV_OP_SCOPE(region) \
|
||||
if (OV_CC_SCOPE_IS_ENABLED(OV_PP_CAT3(ov_op, _, region)) == 0) \
|
||||
throw ngraph::ngraph_error(std::string(OV_PP_TOSTRING(OV_PP_CAT3(ov_op, _, region))) + " is disabled!")
|
||||
# define OV_PASS_CALLBACK(matcher)
|
||||
# define REGISTER_OP(opset_name, op_name)
|
||||
# define INSERT_OP(opset_name, op_name, op_namespace) \
|
||||
if (OV_CC_SCOPE_IS_ENABLED(OV_PP_CAT4(ov_opset_, opset_name, _, op_name)) == 1) \
|
||||
opset.insert<op_namespace::op_name>()
|
||||
#else
|
||||
# define OV_OP_SCOPE(region) OV_ITT_SCOPED_TASK(ov::itt::domains::ov_op, OV_PP_TOSTRING(region))
|
||||
# define OV_PASS_CALLBACK(matcher)
|
||||
# define REGISTER_OP(opset_name, op_name)
|
||||
# define INSERT_OP(opset_name, op_name, op_namespace) opset.insert<op_namespace::op_name>()
|
||||
#endif
|
||||
|
@ -78,55 +78,16 @@ bool ngraph::op::v1::BatchToSpace::visit_attributes(ngraph::AttributeVisitor& vi
|
||||
namespace {
|
||||
bool batch_to_space_evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) {
|
||||
auto data = inputs[0];
|
||||
size_t elem_size = data->get_element_type().size();
|
||||
const auto elem_size = data->get_element_type().size();
|
||||
|
||||
if (data->get_partial_shape().is_dynamic()) {
|
||||
return false;
|
||||
}
|
||||
auto data_shape = data->get_shape();
|
||||
auto data_rank = data_shape.size();
|
||||
if (data_rank < 2) {
|
||||
return false;
|
||||
}
|
||||
|
||||
size_t block_values_size = shape_size(inputs[1]->get_shape());
|
||||
size_t crops_begin_size = shape_size(inputs[2]->get_shape());
|
||||
size_t crops_end_size = shape_size(inputs[3]->get_shape());
|
||||
NGRAPH_CHECK(block_values_size == data_rank && crops_begin_size == data_rank && crops_end_size == data_rank,
|
||||
"Invalid block_shape/crops_begin/crops_end shape with respect to rank of data input");
|
||||
auto const block_values_size = shape_size(inputs[1]->get_shape());
|
||||
|
||||
const auto* block_values = inputs[1]->get_data_ptr<int64_t>();
|
||||
const auto* crops_begin_values = inputs[2]->get_data_ptr<int64_t>();
|
||||
const auto* crops_end_values = inputs[3]->get_data_ptr<int64_t>();
|
||||
|
||||
const bool block_vals_valid = std::all_of(block_values, block_values + block_values_size, [](int64_t elem) {
|
||||
return elem >= 1;
|
||||
});
|
||||
NGRAPH_CHECK(block_vals_valid, "Invalid element values of block_shape input");
|
||||
|
||||
const bool crops_begin_vals_valid =
|
||||
std::all_of(crops_begin_values, crops_begin_values + crops_begin_size, [](int64_t elem) {
|
||||
return elem >= 0;
|
||||
});
|
||||
const bool crops_end_vals_valid =
|
||||
std::all_of(crops_end_values, crops_end_values + crops_end_size, [](int64_t elem) {
|
||||
return elem >= 0;
|
||||
});
|
||||
NGRAPH_CHECK(crops_begin_vals_valid && crops_end_vals_valid,
|
||||
"Invalid element values of crops_begin/crops_end input/s");
|
||||
|
||||
const std::size_t block_prod =
|
||||
std::accumulate(block_values, block_values + block_values_size, int64_t(1), std::multiplies<int64_t>());
|
||||
NGRAPH_CHECK(data_shape[0] % block_prod == 0,
|
||||
"Invalid batch axis of data input with respect to block_shape values");
|
||||
|
||||
for (size_t i = 0; i < data_rank; i++) {
|
||||
const bool is_valid_crops_and_shape =
|
||||
crops_begin_values[i] + crops_end_values[i] <= block_values[i] * static_cast<int64_t>(data_shape[i]);
|
||||
NGRAPH_CHECK(is_valid_crops_and_shape,
|
||||
"Invalid crops values (out of bounds) with respect to the shape of data input");
|
||||
}
|
||||
|
||||
ov::Shape dispersed_shape(1);
|
||||
dispersed_shape.insert(dispersed_shape.end(), data_shape.begin(), data_shape.end());
|
||||
std::vector<size_t> axes_order(block_values_size + 1);
|
||||
@ -214,6 +175,26 @@ bool ngraph::op::v1::BatchToSpace::evaluate(const HostTensorVector& outputs, con
|
||||
OV_OP_SCOPE(v1_BatchToSpace_evaluate);
|
||||
NGRAPH_CHECK(validate_host_tensor_vector(inputs, 4));
|
||||
NGRAPH_CHECK(validate_host_tensor_vector(outputs, 1));
|
||||
|
||||
if (outputs[0]->get_partial_shape().is_dynamic()) {
|
||||
std::map<size_t, HostTensorPtr> constant_data;
|
||||
std::vector<ov::PartialShape> input_shapes;
|
||||
input_shapes.reserve(inputs.size());
|
||||
|
||||
for (size_t i = 0; i < inputs.size(); ++i) {
|
||||
input_shapes.push_back(inputs[i]->get_partial_shape());
|
||||
if (input_shapes.back().is_dynamic()) {
|
||||
return false;
|
||||
}
|
||||
constant_data.emplace(i, inputs[i]);
|
||||
}
|
||||
|
||||
const auto output_shape = shape_infer(this, input_shapes, constant_data).front().to_shape();
|
||||
|
||||
outputs[0]->set_element_type(inputs[0]->get_element_type());
|
||||
outputs[0]->set_shape(output_shape);
|
||||
}
|
||||
|
||||
return batch_to_space_evaluate(outputs, inputs);
|
||||
}
|
||||
|
||||
|
@ -75,13 +75,29 @@ bool ngraph::op::v1::SpaceToBatch::visit_attributes(ngraph::AttributeVisitor& vi
|
||||
|
||||
bool ngraph::op::v1::SpaceToBatch::evaluate_space_to_batch(const HostTensorVector& outputs,
|
||||
const HostTensorVector& inputs) const {
|
||||
if (outputs[0]->get_partial_shape().is_dynamic()) {
|
||||
std::map<size_t, HostTensorPtr> constant_data;
|
||||
std::vector<ov::PartialShape> input_shapes;
|
||||
input_shapes.reserve(inputs.size());
|
||||
|
||||
for (size_t i = 0; i < inputs.size(); ++i) {
|
||||
input_shapes.push_back(inputs[i]->get_partial_shape());
|
||||
if (input_shapes.back().is_dynamic()) {
|
||||
return false;
|
||||
}
|
||||
constant_data.emplace(i, inputs[i]);
|
||||
}
|
||||
|
||||
const auto output_shape = shape_infer(this, input_shapes, constant_data).front().to_shape();
|
||||
|
||||
outputs[0]->set_element_type(inputs[0]->get_element_type());
|
||||
outputs[0]->set_shape(output_shape);
|
||||
}
|
||||
|
||||
const auto& data = inputs[0];
|
||||
const auto& out = outputs[0];
|
||||
size_t elem_size = data->get_element_type().size();
|
||||
|
||||
if (data->get_partial_shape().is_dynamic()) {
|
||||
return false;
|
||||
}
|
||||
auto data_shape = data->get_shape();
|
||||
|
||||
if (!(data->get_shape().size() == 4 || data->get_shape().size() == 5)) {
|
||||
@ -188,6 +204,7 @@ bool ngraph::op::v1::SpaceToBatch::evaluate_space_to_batch(const HostTensorVecto
|
||||
|
||||
bool ngraph::op::v1::SpaceToBatch::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const {
|
||||
OV_OP_SCOPE(v1_SpaceToBatch_evaluate);
|
||||
|
||||
return evaluate_space_to_batch(outputs, inputs);
|
||||
}
|
||||
|
||||
|
@ -239,37 +239,6 @@ bool ov::pass::GraphRewrite::apply_matcher_passes(std::shared_ptr<Model> f,
|
||||
return rewritten;
|
||||
}
|
||||
|
||||
void ov::pass::GraphRewrite::add_matcher(const std::shared_ptr<pattern::Matcher>& m,
|
||||
const graph_rewrite_callback& callback,
|
||||
const PassPropertyMask& property) {
|
||||
m_matchers.push_back(std::make_shared<MatcherPass>(
|
||||
m->get_name(),
|
||||
m,
|
||||
[m, callback](const std::shared_ptr<Node>& node) -> bool {
|
||||
NGRAPH_DEBUG << "Running matcher " << m->get_name() << " on " << node;
|
||||
if (m->match(node->output(0))) {
|
||||
NGRAPH_DEBUG << "Matcher " << m->get_name() << " matched " << node;
|
||||
OV_PASS_CALLBACK(m);
|
||||
bool status = callback(*m.get());
|
||||
// explicitly clear Matcher state because it holds pointers to matched nodes
|
||||
m->clear_state();
|
||||
return status;
|
||||
}
|
||||
m->clear_state();
|
||||
return false;
|
||||
},
|
||||
property));
|
||||
}
|
||||
|
||||
void ov::pass::GraphRewrite::add_matcher(const std::shared_ptr<pattern::Matcher>& m,
|
||||
const graph_rewrite_callback& callback) {
|
||||
NGRAPH_SUPPRESS_DEPRECATED_START
|
||||
// TODO: before deprecate this function, by default expect the
|
||||
// callback require static shape.
|
||||
add_matcher(m, callback, {PassProperty::REQUIRE_STATIC_SHAPE});
|
||||
NGRAPH_SUPPRESS_DEPRECATED_END
|
||||
}
|
||||
|
||||
void ov::pass::GraphRewrite::set_pass_config(const std::shared_ptr<PassConfig>& rhs) {
|
||||
auto pass_config = get_pass_config();
|
||||
// We have to preserve disabled passes because in case when we register matchers inside
|
||||
|
@ -74,23 +74,6 @@ ov::pass::ModelPass::~ModelPass() = default;
|
||||
|
||||
OPENVINO_SUPPRESS_DEPRECATED_START
|
||||
|
||||
bool ov::pass::ModelPass::run_on_model(const std::shared_ptr<ov::Model>& m) {
|
||||
RUN_ON_MODEL_SCOPE(ModelPass);
|
||||
RunLocker locked(call_on_model);
|
||||
OPENVINO_ASSERT(!call_on_function,
|
||||
"Cycle detected. run_on_model() or run_on_function() method should be overridden.");
|
||||
bool sts = run_on_function(m);
|
||||
return sts;
|
||||
}
|
||||
|
||||
bool ov::pass::ModelPass::run_on_function(std::shared_ptr<ov::Model> m) {
|
||||
RUN_ON_FUNCTION_SCOPE(ModelPass);
|
||||
RunLocker locked(call_on_function);
|
||||
OPENVINO_ASSERT(!call_on_model, "Cycle detected. run_on_model() or run_on_function() method should be overridden.");
|
||||
bool sts = run_on_model(m);
|
||||
return sts;
|
||||
}
|
||||
|
||||
NGRAPH_RTTI_DEFINITION(ngraph::pass::NodePass, "ngraph::pass::NodePass", 0);
|
||||
|
||||
ngraph::pass::NodePass::~NodePass() = default;
|
||||
|
@ -107,7 +107,7 @@ TEST(GraphRewriteTest, MatcherPassCallback) {
|
||||
|
||||
Anchor anchor;
|
||||
anchor.add_matcher<TestPass>()->set_callback(get_callback());
|
||||
anchor.run_on_function(f);
|
||||
anchor.run_on_model(f);
|
||||
|
||||
ASSERT_EQ(count_ops_of_type<opset3::Relu>(f), 1);
|
||||
}
|
||||
@ -118,7 +118,7 @@ TEST(GraphRewriteTest, GraphRewriteCallback) {
|
||||
Anchor anchor;
|
||||
anchor.add_matcher<TestPass>();
|
||||
anchor.set_callback(get_callback());
|
||||
anchor.run_on_function(f);
|
||||
anchor.run_on_model(f);
|
||||
|
||||
ASSERT_EQ(count_ops_of_type<opset3::Relu>(f), 1);
|
||||
}
|
||||
@ -129,7 +129,7 @@ TEST(GraphRewriteTest, ManagerCallbackDeprecated) {
|
||||
pass::Manager manager;
|
||||
auto anchor = manager.register_pass<Anchor>();
|
||||
anchor->add_matcher<TestPass>();
|
||||
manager.set_callback(get_callback());
|
||||
manager.get_pass_config()->set_callback(get_callback());
|
||||
manager.run_passes(f);
|
||||
|
||||
ASSERT_EQ(count_ops_of_type<opset3::Relu>(f), 1);
|
||||
@ -153,7 +153,7 @@ TEST(GraphRewriteTest, ManagerCallback2) {
|
||||
|
||||
pass::Manager manager;
|
||||
auto anchor = manager.register_pass<TestPass>();
|
||||
manager.set_callback(get_callback());
|
||||
manager.get_pass_config()->set_callback(get_callback());
|
||||
manager.run_passes(f);
|
||||
|
||||
ASSERT_EQ(count_ops_of_type<opset3::Relu>(f), 1);
|
||||
@ -179,7 +179,7 @@ TEST(GraphRewriteTest, MatcherPassCallbackDerived) {
|
||||
|
||||
Anchor anchor;
|
||||
anchor.add_matcher<TestPass>()->set_callback(get_callback());
|
||||
anchor.run_on_function(f);
|
||||
anchor.run_on_model(f);
|
||||
|
||||
ASSERT_EQ(count_ops_of_type<opset3::Relu>(f), 1);
|
||||
}
|
||||
@ -228,7 +228,7 @@ TEST(GraphRewriteTest, TypeBasedMatcherPassCallback) {
|
||||
|
||||
Anchor anchor;
|
||||
anchor.add_matcher<TypeBasedTestPass>()->set_callback(get_callback());
|
||||
anchor.run_on_function(f);
|
||||
anchor.run_on_model(f);
|
||||
|
||||
ASSERT_EQ(count_ops_of_type<opset3::Relu>(f), 1);
|
||||
}
|
||||
@ -238,7 +238,7 @@ TEST(GraphRewriteTest, TypeBasedMatcherPassCallbackDerived) {
|
||||
|
||||
Anchor anchor;
|
||||
anchor.add_matcher<TypeBasedTestPass>()->set_callback(get_callback());
|
||||
anchor.run_on_function(f);
|
||||
anchor.run_on_model(f);
|
||||
|
||||
ASSERT_EQ(count_ops_of_type<opset3::Relu>(f), 1);
|
||||
}
|
||||
@ -249,7 +249,7 @@ TEST(GraphRewriteTest, TypeBasedMatcherPassOrder1) {
|
||||
Anchor anchor;
|
||||
anchor.add_matcher<TypeBasedTestPass>()->set_callback(get_callback());
|
||||
anchor.add_matcher<TypeBasedTestPassDerived>()->set_callback(get_callback());
|
||||
anchor.run_on_function(f);
|
||||
anchor.run_on_model(f);
|
||||
|
||||
ASSERT_EQ(count_ops_of_type<opset3::Relu>(f), 1);
|
||||
}
|
||||
@ -260,7 +260,7 @@ TEST(GraphRewriteTest, TypeBasedMatcherPassOrder2) {
|
||||
Anchor anchor;
|
||||
anchor.add_matcher<TypeBasedTestPassDerived>()->set_callback(get_callback());
|
||||
anchor.add_matcher<TypeBasedTestPass>()->set_callback(get_callback());
|
||||
anchor.run_on_function(f);
|
||||
anchor.run_on_model(f);
|
||||
|
||||
ASSERT_EQ(count_ops_of_type<opset3::Tanh>(f), 1);
|
||||
}
|
||||
|
@ -98,7 +98,7 @@ TEST(pattern, matcher_pass) {
|
||||
|
||||
pass::GraphRewrite pass;
|
||||
pass.add_matcher<TestMatcherPass>();
|
||||
pass.run_on_function(f);
|
||||
pass.run_on_model(f);
|
||||
|
||||
// Parameter->Relu->Result
|
||||
ASSERT_TRUE(f->get_ops().size() == 3);
|
||||
|
@ -56,7 +56,7 @@ class TestFunctionPass : public ngraph::pass::FunctionPass {
|
||||
public:
|
||||
NGRAPH_RTTI_DECLARATION;
|
||||
|
||||
bool run_on_function(std::shared_ptr<Function> f) override {
|
||||
bool run_on_model(const std::shared_ptr<Function>& f) override {
|
||||
pass::Manager manager(get_pass_config());
|
||||
|
||||
manager.register_pass<RenameReLU, false /*disabled by default*/>();
|
||||
|
@ -37,7 +37,7 @@ namespace {
|
||||
class DummyPass : public pass::FunctionPass {
|
||||
public:
|
||||
DummyPass() {}
|
||||
bool run_on_function(std::shared_ptr<ngraph::Function> /* f */) override {
|
||||
bool run_on_model(const std::shared_ptr<ngraph::Function>& /* f */) override {
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
@ -111,9 +111,23 @@ public:
|
||||
};
|
||||
|
||||
auto m = make_shared<TestMatcher>(make_shared<op::v1::Multiply>(pattern, iconst1));
|
||||
NGRAPH_SUPPRESS_DEPRECATED_START
|
||||
this->add_matcher(m, callback);
|
||||
NGRAPH_SUPPRESS_DEPRECATED_END
|
||||
auto match_pass = std::make_shared<ov::pass::MatcherPass>(
|
||||
m->get_name(),
|
||||
m,
|
||||
[m, callback](const std::shared_ptr<Node>& node) -> bool {
|
||||
NGRAPH_DEBUG << "Running matcher " << m->get_name() << " on " << node;
|
||||
if (std::dynamic_pointer_cast<ov::pass::pattern::Matcher>(m)->match(node->output(0))) {
|
||||
NGRAPH_DEBUG << "Matcher " << m->get_name() << " matched " << node;
|
||||
bool status = callback(*m.get());
|
||||
// explicitly clear Matcher state because it holds pointers to matched nodes
|
||||
m->clear_state();
|
||||
return status;
|
||||
}
|
||||
m->clear_state();
|
||||
return false;
|
||||
},
|
||||
ov::pass::PassProperty::REQUIRE_STATIC_SHAPE);
|
||||
this->add_matcher(match_pass);
|
||||
}
|
||||
|
||||
void construct_add_zero() {
|
||||
@ -156,9 +170,23 @@ public:
|
||||
|
||||
auto add = make_shared<op::v1::Add>(pattern, iconst0);
|
||||
auto m = make_shared<TestMatcher>(add);
|
||||
NGRAPH_SUPPRESS_DEPRECATED_START
|
||||
this->add_matcher(m, callback);
|
||||
NGRAPH_SUPPRESS_DEPRECATED_END
|
||||
auto match_pass = std::make_shared<ov::pass::MatcherPass>(
|
||||
m->get_name(),
|
||||
m,
|
||||
[m, callback](const std::shared_ptr<Node>& node) -> bool {
|
||||
NGRAPH_DEBUG << "Running matcher " << m->get_name() << " on " << node;
|
||||
if (std::dynamic_pointer_cast<ov::pass::pattern::Matcher>(m)->match(node->output(0))) {
|
||||
NGRAPH_DEBUG << "Matcher " << m->get_name() << " matched " << node;
|
||||
bool status = callback(*m.get());
|
||||
// explicitly clear Matcher state because it holds pointers to matched nodes
|
||||
m->clear_state();
|
||||
return status;
|
||||
}
|
||||
m->clear_state();
|
||||
return false;
|
||||
},
|
||||
ov::pass::PassProperty::REQUIRE_STATIC_SHAPE);
|
||||
this->add_matcher(match_pass);
|
||||
}
|
||||
|
||||
TestGraphRewrite() : GraphRewrite() {
|
||||
|
@ -388,19 +388,19 @@ TEST(type_prop, batch_to_space_input_interval_shape_block_one) {
|
||||
}
|
||||
|
||||
TEST(type_prop, batch_to_space_and_space_to_batch) {
|
||||
auto data = make_shared<op::Parameter>(element::f32, Shape{4800, 9, 11, 2});
|
||||
auto data = make_shared<op::Parameter>(element::f32, PartialShape{4800, 9, {11, -1}, 2});
|
||||
auto block_shape = make_shared<op::Constant>(element::i64, Shape{4}, vector<int64_t>{1, 12, 100, 2});
|
||||
auto crops_begin = make_shared<op::Constant>(element::i64, Shape{4}, vector<int64_t>{0, 3, 38, 1});
|
||||
auto crops_end = make_shared<op::Constant>(element::i64, Shape{4}, vector<int64_t>{0, 5, 38, 0});
|
||||
auto batch_to_space = make_shared<op::v1::BatchToSpace>(data, block_shape, crops_begin, crops_end);
|
||||
|
||||
ASSERT_EQ(batch_to_space->get_element_type(), element::f32);
|
||||
ASSERT_EQ(batch_to_space->get_shape(),
|
||||
(Shape{4800 / (12 * 100 * 2), 9 * 12 - 3 - 5, 11 * 100 - 38 - 38, 2 * 2 - 1}));
|
||||
ASSERT_EQ(batch_to_space->get_output_partial_shape(0),
|
||||
(PartialShape{4800 / (12 * 100 * 2), 9 * 12 - 3 - 5, {11 * 100 - 38 - 38, -1}, 2 * 2 - 1}));
|
||||
|
||||
auto space_to_batch = make_shared<op::v1::SpaceToBatch>(batch_to_space, block_shape, crops_begin, crops_end);
|
||||
ASSERT_EQ(space_to_batch->get_element_type(), element::f32);
|
||||
ASSERT_EQ(space_to_batch->get_shape(), (Shape{4800, 9, 11, 2}));
|
||||
ASSERT_EQ(space_to_batch->get_output_partial_shape(0), (PartialShape{4800, 9, {11, -1}, 2}));
|
||||
}
|
||||
|
||||
TEST(type_prop, batch_to_space_dynamic_shape_static_rank) {
|
||||
@ -441,3 +441,37 @@ TEST(type_prop, batch_to_space_default_ctor) {
|
||||
EXPECT_EQ(batch_to_space->get_element_type(), element::i16);
|
||||
EXPECT_EQ(batch_to_space->get_shape(), (Shape{100 / (10 * 5), 7 * 10 - 3 - 3, 13 * 5 - 1, 3}));
|
||||
}
|
||||
|
||||
TEST(type_prop, batch_to_space_non_const_inputs) {
|
||||
auto data = make_shared<op::Parameter>(element::f32, PartialShape{100, 7, 13, 3});
|
||||
|
||||
auto block_shape = make_shared<op::Parameter>(element::i64, PartialShape{4});
|
||||
auto crops_begin = make_shared<op::Parameter>(element::i64, PartialShape{4});
|
||||
auto crops_end = make_shared<op::Parameter>(element::i64, PartialShape{4});
|
||||
auto batch_to_space = make_shared<op::v1::BatchToSpace>(data, block_shape, crops_begin, crops_end);
|
||||
|
||||
EXPECT_EQ(batch_to_space->get_element_type(), element::f32);
|
||||
EXPECT_EQ(batch_to_space->get_output_partial_shape(0), PartialShape::dynamic(4));
|
||||
}
|
||||
|
||||
TEST(type_prop, batch_to_space_block_non_constant_only) {
|
||||
auto data = make_shared<op::Parameter>(element::f32, PartialShape{100, 7, 13, 3});
|
||||
auto block_shape = make_shared<op::Parameter>(element::i64, PartialShape{4});
|
||||
auto crops_begin = make_shared<op::Constant>(element::i64, Shape{4}, vector<int64_t>{0, 3, 1, 0});
|
||||
auto crops_end = make_shared<op::Constant>(element::i64, Shape{4}, vector<int64_t>{0, 3, 0, 0});
|
||||
auto batch_to_space = make_shared<op::v1::BatchToSpace>(data, block_shape, crops_begin, crops_end);
|
||||
|
||||
EXPECT_EQ(batch_to_space->get_element_type(), element::f32);
|
||||
EXPECT_EQ(batch_to_space->get_output_partial_shape(0), PartialShape({-1, {1, -1}, {12, -1}, {3, -1}}));
|
||||
}
|
||||
|
||||
TEST(type_prop, batch_to_space_crops_non_constant_only) {
|
||||
auto data = make_shared<op::Parameter>(element::f32, PartialShape{100, 7, 13, 3});
|
||||
auto block_shape = make_shared<op::Constant>(element::i64, Shape{4}, vector<int64_t>{1, 2, 5, 1});
|
||||
auto crops_begin = make_shared<op::Parameter>(element::i64, PartialShape{4});
|
||||
auto crops_end = make_shared<op::Parameter>(element::i64, PartialShape{4});
|
||||
auto batch_to_space = make_shared<op::v1::BatchToSpace>(data, block_shape, crops_begin, crops_end);
|
||||
|
||||
EXPECT_EQ(batch_to_space->get_element_type(), element::f32);
|
||||
EXPECT_EQ(batch_to_space->get_output_partial_shape(0), PartialShape({10, -1, -1, -1}));
|
||||
}
|
||||
|
@ -49,7 +49,7 @@ TEST(type_prop, space_to_batch_output_shape_5D) {
|
||||
}
|
||||
|
||||
TEST(type_prop, space_to_batch_and_batch_to_space) {
|
||||
auto data = make_shared<op::Parameter>(element::f32, Shape{2, 100, 1024, 3});
|
||||
auto data = make_shared<op::Parameter>(element::f32, PartialShape{2, {100, -1}, 1024, 3});
|
||||
auto block_shape = make_shared<op::Constant>(element::i64, Shape{4}, vector<int64_t>{1, 12, 100, 2});
|
||||
auto pads_begin = make_shared<op::Constant>(element::i64, Shape{4}, vector<int64_t>{0, 3, 38, 1});
|
||||
auto pads_end = make_shared<op::Constant>(element::i64, Shape{4}, vector<int64_t>{0, 5, 38, 0});
|
||||
@ -57,12 +57,12 @@ TEST(type_prop, space_to_batch_and_batch_to_space) {
|
||||
auto space_to_batch = make_shared<op::v1::SpaceToBatch>(data, block_shape, pads_begin, pads_end);
|
||||
|
||||
ASSERT_EQ(space_to_batch->get_element_type(), element::f32);
|
||||
ASSERT_EQ(space_to_batch->get_shape(),
|
||||
(Shape{2 * 12 * 100 * 2, (100 + 3 + 5) / 12, (1024 + 38 + 38) / 100, (3 + 1) / 2}));
|
||||
ASSERT_EQ(space_to_batch->get_output_partial_shape(0),
|
||||
(PartialShape{2 * 12 * 100 * 2, {(100 + 3 + 5) / 12, -1}, (1024 + 38 + 38) / 100, (3 + 1) / 2}));
|
||||
|
||||
auto batch_to_space = make_shared<op::v1::BatchToSpace>(space_to_batch, block_shape, pads_begin, pads_end);
|
||||
ASSERT_EQ(batch_to_space->get_element_type(), element::f32);
|
||||
ASSERT_EQ(batch_to_space->get_shape(), (Shape{2, 100, 1024, 3}));
|
||||
ASSERT_EQ(batch_to_space->get_output_partial_shape(0), (PartialShape{2, {100, -1}, 1024, 3}));
|
||||
}
|
||||
|
||||
TEST(type_prop, space_to_batch_when_space_is_static) {
|
||||
@ -117,13 +117,13 @@ TEST(type_prop, space_to_batch_when_space_is_dynamic) {
|
||||
TEST(type_prop, space_to_batch_dynamic_shape_static_rank) {
|
||||
auto data = make_shared<op::Parameter>(element::f32, PartialShape::dynamic(4));
|
||||
auto block_shape = make_shared<op::Constant>(element::i64, Shape{4}, vector<int64_t>{1, 10, 5, 1});
|
||||
auto pads_begin = make_shared<op::Constant>(element::i64, Shape{4}, vector<int64_t>{0, 3, 1, 0});
|
||||
auto pads_begin = make_shared<op::Constant>(element::i64, Shape{4}, vector<int64_t>{0, 3, 2, 0});
|
||||
auto pads_end = make_shared<op::Constant>(element::i64, Shape{4}, vector<int64_t>{0, 3, 0, 0});
|
||||
|
||||
auto space_to_batch = make_shared<op::v1::SpaceToBatch>(data, block_shape, pads_begin, pads_end);
|
||||
|
||||
ASSERT_EQ(space_to_batch->get_element_type(), element::f32);
|
||||
ASSERT_EQ(space_to_batch->get_output_partial_shape(0), PartialShape::dynamic(4));
|
||||
ASSERT_EQ(space_to_batch->get_output_partial_shape(0), PartialShape({-1, {1, -1}, {1, -1}, -1}));
|
||||
}
|
||||
|
||||
TEST(type_prop, space_to_batch_dynamic_shape_dynamic_rank) {
|
||||
@ -151,7 +151,7 @@ TEST(type_prop, space_to_batch_dynamic_rank_shape_block_and_pads_not_const) {
|
||||
}
|
||||
|
||||
TEST(type_prop, space_to_batch_default_ctor) {
|
||||
auto data = make_shared<op::Parameter>(element::f32, PartialShape{{2, 5}, 100, {100, 1024}, 3});
|
||||
auto data = make_shared<op::Parameter>(element::f32, PartialShape{{2, 5}, 100, {100, -1}, 3});
|
||||
auto block_shape = make_shared<op::Constant>(element::i64, Shape{4}, vector<int64_t>{1, 2, 4, 1});
|
||||
auto pads_begin = make_shared<op::Constant>(element::i64, Shape{4}, vector<int64_t>{1, 1, 2, 0});
|
||||
auto pads_end = make_shared<op::Constant>(element::i64, Shape{4}, vector<int64_t>{1, 1, 6, 0});
|
||||
@ -164,7 +164,42 @@ TEST(type_prop, space_to_batch_default_ctor) {
|
||||
EXPECT_EQ(space_to_batch->get_output_size(), 1);
|
||||
EXPECT_EQ(space_to_batch->get_output_element_type(0), element::f32);
|
||||
EXPECT_EQ(space_to_batch->get_output_partial_shape(0),
|
||||
PartialShape({{2 * 2 * 4, 5 * 2 * 4}, (100 + 2) / 2, {(100 + 2 + 6) / 4, (1024 + 2 + 6) / 4}, 3}));
|
||||
PartialShape({{2 * 2 * 4, 5 * 2 * 4}, (100 + 2) / 2, {(100 + 2 + 6) / 4, -1}, 3}));
|
||||
}
|
||||
|
||||
TEST(type_prop, space_to_batch_non_const_inputs) {
|
||||
auto data = make_shared<op::Parameter>(element::f32, PartialShape{100, 7, 13, 3});
|
||||
|
||||
auto block_shape = make_shared<op::Parameter>(element::i64, PartialShape{4});
|
||||
auto pads_begin = make_shared<op::Parameter>(element::i64, PartialShape{4});
|
||||
auto pads_end = make_shared<op::Parameter>(element::i64, PartialShape{4});
|
||||
auto space_to_batch = make_shared<op::v1::SpaceToBatch>(data, block_shape, pads_begin, pads_end);
|
||||
|
||||
EXPECT_EQ(space_to_batch->get_element_type(), element::f32);
|
||||
EXPECT_EQ(space_to_batch->get_output_partial_shape(0), PartialShape::dynamic(4));
|
||||
}
|
||||
|
||||
TEST(type_prop, space_to_batch_block_non_constant_only) {
|
||||
auto data = make_shared<op::Parameter>(element::f32, PartialShape{100, 7, 13, 3});
|
||||
auto block_shape = make_shared<op::Parameter>(element::i64, PartialShape{4});
|
||||
auto pads_begin = make_shared<op::Constant>(element::i64, Shape{4}, vector<int64_t>{0, 3, 1, 0});
|
||||
auto pads_end = make_shared<op::Constant>(element::i64, Shape{4}, vector<int64_t>{0, 3, 0, 0});
|
||||
auto space_to_batch = make_shared<op::v1::SpaceToBatch>(data, block_shape, pads_begin, pads_end);
|
||||
|
||||
EXPECT_EQ(space_to_batch->get_element_type(), element::f32);
|
||||
EXPECT_EQ(space_to_batch->get_output_partial_shape(0), PartialShape::dynamic(4));
|
||||
}
|
||||
|
||||
TEST(type_prop, space_to_batch_crops_non_constant_only) {
|
||||
auto data = make_shared<op::Parameter>(element::f32, PartialShape{100, 7, 13, 3});
|
||||
|
||||
auto block_shape = make_shared<op::Constant>(element::i64, Shape{4}, vector<int64_t>{1, 2, 5, 1});
|
||||
auto pads_begin = make_shared<op::Parameter>(element::i64, PartialShape{4});
|
||||
auto pads_end = make_shared<op::Parameter>(element::i64, PartialShape{4});
|
||||
auto space_to_batch = make_shared<op::v1::SpaceToBatch>(data, block_shape, pads_begin, pads_end);
|
||||
|
||||
EXPECT_EQ(space_to_batch->get_element_type(), element::f32);
|
||||
EXPECT_EQ(space_to_batch->get_output_partial_shape(0), PartialShape({1000, -1, -1, -1}));
|
||||
}
|
||||
|
||||
TEST(type_prop, space_to_batch_invalid_element_type_block_shape) {
|
||||
|
@ -39,9 +39,9 @@ public:
|
||||
std::vector<std::pair<ngraph::element::Type, std::vector<std::uint8_t>>> CalculateRefs() override {
|
||||
// Convert the second input constant precision to i64 to run the reference function
|
||||
if (ngraph::element::Type_t::i8 == secondConstantType) {
|
||||
ngraph::pass::ConvertPrecision<ngraph::element::Type_t::i8, ngraph::element::Type_t::i64>().run_on_function(functionRefs);
|
||||
ngraph::pass::ConvertPrecision<ngraph::element::Type_t::i8, ngraph::element::Type_t::i64>().run_on_model(functionRefs);
|
||||
} else if (ngraph::element::Type_t::bf16 == secondConstantType) {
|
||||
ngraph::pass::ConvertPrecision<ngraph::element::Type_t::bf16, ngraph::element::Type_t::i64>().run_on_function(functionRefs);
|
||||
ngraph::pass::ConvertPrecision<ngraph::element::Type_t::bf16, ngraph::element::Type_t::i64>().run_on_model(functionRefs);
|
||||
}
|
||||
return LayerTestsUtils::LayerTestsCommon::CalculateRefs();
|
||||
}
|
||||
|
@ -203,8 +203,8 @@ public:
|
||||
using ngraph::pass::ConvertPrecision;
|
||||
ConcatConvSumInPlaceTest::SetUp();
|
||||
functionRefs = function->clone();
|
||||
ngraph::pass::ConvertPrecision<ngraph::element::Type_t::i8, ngraph::element::Type_t::f32>().run_on_function(functionRefs);
|
||||
ngraph::pass::ConvertPrecision<ngraph::element::Type_t::u8, ngraph::element::Type_t::f32>().run_on_function(functionRefs);
|
||||
ngraph::pass::ConvertPrecision<ngraph::element::Type_t::i8, ngraph::element::Type_t::f32>().run_on_model(functionRefs);
|
||||
ngraph::pass::ConvertPrecision<ngraph::element::Type_t::u8, ngraph::element::Type_t::f32>().run_on_model(functionRefs);
|
||||
functionRefs->validate_nodes_and_infer_types();
|
||||
}
|
||||
};
|
||||
|
@ -229,12 +229,6 @@ std::string deconvolution_inst::to_string(deconvolution_node const& node) {
|
||||
auto node_info = node.desc_to_json();
|
||||
|
||||
std::stringstream primitive_description;
|
||||
std::stringstream ss_weights, ss_biases;
|
||||
|
||||
ss_weights << node.weights().id();
|
||||
ss_weights << ", count: " << node.weights().get_output_layout().count();
|
||||
ss_biases << node.bias().id();
|
||||
ss_biases << ", count: " << node.bias().get_output_layout().count();
|
||||
|
||||
json_composite deconv_info;
|
||||
deconv_info.add("stride", cldnn::to_string(strd));
|
||||
@ -245,6 +239,17 @@ std::string deconvolution_inst::to_string(deconvolution_node const& node) {
|
||||
ud_out_size_info.add("size", desc->output_size.to_string());
|
||||
deconv_info.add("with_user_defined_output_size", ud_out_size_info);
|
||||
}
|
||||
std::stringstream ss_weights;
|
||||
ss_weights << node.weights().id();
|
||||
ss_weights << ", count: " << node.weights().get_output_layout().count();
|
||||
deconv_info.add("weights", ss_weights.str());
|
||||
if (node.bias_term()) {
|
||||
std::stringstream ss_biases;
|
||||
ss_biases << node.bias().id();
|
||||
ss_biases << ", count: " << node.bias().get_output_layout().count();
|
||||
deconv_info.add("bias", ss_biases.str());
|
||||
}
|
||||
|
||||
node_info->add("deconvolution info", deconv_info);
|
||||
node_info->dump(primitive_description);
|
||||
return primitive_description.str();
|
||||
|
@ -105,6 +105,10 @@ add_fusing_type onednn_add_fusing_helpers::get_add_fusing_type(
|
||||
auto p_layout = p_node.get_output_layout();
|
||||
auto d_layout = dep_node.get_output_layout();
|
||||
|
||||
if (p_node.is_dynamic() || dep_node.is_dynamic()) {
|
||||
return add_fusing_type::not_supported;
|
||||
}
|
||||
|
||||
if (is_full_tensor(p_layout) && is_full_tensor(d_layout)) {
|
||||
if (data_type_traits::size_of(p_layout.data_type) == data_type_traits::size_of(d_layout.data_type)
|
||||
&& p_layout.format == d_layout.format && p_layout.get_tensor() == d_layout.get_tensor()
|
||||
|
@ -1140,7 +1140,10 @@ JitConstants MakeActivationJitConstants(ActivationFunction activation_function,
|
||||
jitConstants.AddConstant(MakeJitConstant(macro_def, "(input)"));
|
||||
break;
|
||||
case ActivationFunction::CEIL:
|
||||
jitConstants.AddConstant(MakeJitConstant(macro_def, "(ceil(input))"));
|
||||
if (out_dt == Datatype::F32 || out_dt == Datatype::F16)
|
||||
jitConstants.AddConstant(MakeJitConstant(macro_def, "(ceil(input))"));
|
||||
else
|
||||
jitConstants.AddConstant(MakeJitConstant(macro_def, "(input)"));
|
||||
break;
|
||||
case ActivationFunction::NEGATIVE:
|
||||
jitConstants.AddConstant(MakeJitConstant(macro_def, "(-input)"));
|
||||
|
@ -117,24 +117,26 @@ bool ScatterNDUpdateKernelRef::Validate(const Params& p, const optional_params&
|
||||
return true;
|
||||
}
|
||||
|
||||
static std::string GetInputBlockND(const scatter_nd_update_params& params, size_t num, size_t rank) {
|
||||
static std::string GetInputBlockND(const scatter_nd_update_params& params, size_t num, size_t dyn_offset, size_t rank) {
|
||||
const auto& input = params.inputs[num];
|
||||
|
||||
auto input_dims = input.LogicalDims();
|
||||
std::reverse(input_dims.begin(), input_dims.end());
|
||||
auto dims = input.GetDims();
|
||||
std::reverse(dims.begin(), dims.end());
|
||||
|
||||
std::vector<size_t> block_nd(rank + 1);
|
||||
block_nd[rank] = 1;
|
||||
|
||||
std::vector<std::string> block_nd_s(rank + 1);
|
||||
block_nd_s[rank] = "1";
|
||||
size_t input_offset = num * 6;
|
||||
size_t input_offset = dyn_offset * 6;
|
||||
|
||||
for (int32_t idx = rank - 1; idx >= 0; --idx) {
|
||||
block_nd[idx] = input_dims[idx] * block_nd[idx + 1];
|
||||
|
||||
size_t dim_offset = idx < 2 ? idx : idx + 6 - rank;
|
||||
block_nd_s[idx] = "(" + toCodeString(input.GetDims()[input.GetDims().size() - idx - 1], input_offset + dim_offset) + "*" + block_nd_s[idx + 1] + ")";
|
||||
size_t dim_offset = idx < 2 ? idx : (6 - dims.size()) + idx; // convert to 6d bfwzyx idx
|
||||
block_nd_s[idx] = "(" + toCodeString(dims[idx], input_offset + dim_offset) + "*" + block_nd_s[idx + 1] + ")";
|
||||
}
|
||||
|
||||
std::string result;
|
||||
@ -180,18 +182,24 @@ KernelsData ScatterNDUpdateKernelRef::GetKernelsData(const Params& params, const
|
||||
size_t input0_rank = newParams.inputs[0].LogicalDims().size();
|
||||
size_t input2_rank = newParams.inputs[2].LogicalDims().size();
|
||||
cldnn_jit.AddConstant(MakeJitConstant("IS_SECOND_ITER", "true"));
|
||||
cldnn_jit.AddConstant(MakeJitConstant("INPUT0_BLOCK_ND", GetInputBlockND(newParams, 0, input0_rank)));
|
||||
cldnn_jit.AddConstant(MakeJitConstant("INPUT1_BLOCK_ND", GetInputBlockND(newParams, 1, newParams.indices_rank - 1)));
|
||||
cldnn_jit.AddConstant(MakeJitConstant("INPUT2_BLOCK_ND", GetInputBlockND(newParams, 2, input2_rank)));
|
||||
size_t shape_info_offset = 0;
|
||||
cldnn_jit.AddConstant(MakeJitConstant("INPUT0_BLOCK_ND", GetInputBlockND(newParams, 0, shape_info_offset, input0_rank)));
|
||||
if (newParams.inputs[0].is_dynamic())
|
||||
shape_info_offset++;
|
||||
cldnn_jit.AddConstant(MakeJitConstant("INPUT1_BLOCK_ND", GetInputBlockND(newParams, 1, shape_info_offset, newParams.indices_rank - 1)));
|
||||
if (newParams.inputs[1].is_dynamic())
|
||||
shape_info_offset++;
|
||||
cldnn_jit.AddConstant(MakeJitConstant("INPUT2_BLOCK_ND", GetInputBlockND(newParams, 2, shape_info_offset, input2_rank)));
|
||||
cldnn_jit.AddConstant(MakeJitConstant("INDICES_RANK", newParams.indices_rank));
|
||||
|
||||
const auto& ind_input = newParams.inputs[1];
|
||||
if (ind_input.is_dynamic()) {
|
||||
size_t last_idx = newParams.indices_rank - 1;
|
||||
size_t dim_offset = last_idx < 2 ? last_idx : 5;
|
||||
size_t input_idx = last_idx < 2 ? ind_input.GetDims().size() - last_idx : 0;
|
||||
auto dims = ind_input.GetDims();
|
||||
std::reverse(dims.begin(), dims.end());
|
||||
|
||||
auto indices_last_dim = toCodeString(ind_input.GetDims()[input_idx], 6 + dim_offset);
|
||||
size_t last_idx = newParams.indices_rank - 1;
|
||||
size_t dim_offset = last_idx < 2 ? last_idx : last_idx + 6 - newParams.indices_rank;
|
||||
auto indices_last_dim = toCodeString(dims[last_idx], dim_offset + (newParams.inputs[0].is_dynamic() ? 6 : 0));
|
||||
cldnn_jit.AddConstant(MakeJitConstant("INDICES_LAST_DIM", indices_last_dim));
|
||||
} else {
|
||||
cldnn_jit.AddConstant(MakeJitConstant("INDICES_LAST_DIM", dispatchData.indicesLastDim));
|
||||
|
@ -25,8 +25,12 @@ ov::intel_gpu::ConvertAvgPoolingToReduce::ConvertAvgPoolingToReduce() {
|
||||
auto pads_begin = pool->get_pads_begin();
|
||||
auto pads_end = pool->get_pads_end();
|
||||
|
||||
int64_t rank = pool->get_input_partial_shape(0).size();
|
||||
auto input_shape = pool->get_input_shape(0);
|
||||
auto input = pool->input_value(0);
|
||||
const auto input_shape = input.get_partial_shape();
|
||||
if (input_shape.is_dynamic() || input_shape.rank().is_dynamic()) {
|
||||
return false;
|
||||
}
|
||||
const auto rank = input_shape.rank().get_length();
|
||||
// Check if input spatial size is same with kernel size.
|
||||
bool has_same_spatial_size = rank > 2 && std::equal(input_shape.end() - (rank - 2), input_shape.end(), kernel.end() - (rank - 2));
|
||||
// Check if pads are zeros.
|
||||
|
@ -22,6 +22,18 @@
|
||||
using namespace cldnn;
|
||||
using namespace ::tests;
|
||||
|
||||
namespace {
|
||||
template<typename T>
|
||||
T generate_random_val(int min, int max, int k = 8) {
|
||||
static std::default_random_engine generator(random_seed);
|
||||
// 1/k is the resolution of the floating point numbers
|
||||
std::uniform_int_distribution<int> distribution(k * min, k * max);
|
||||
T val = (T)distribution(generator);
|
||||
val /= k;
|
||||
|
||||
return val;
|
||||
}
|
||||
}
|
||||
|
||||
struct scatter_nd_update_basic_test_params
|
||||
{
|
||||
@ -51,17 +63,6 @@ struct scatter_nd_update_random_test : testing::TestWithParam<scatter_nd_update_
|
||||
return cldnn::format::bfwzyx;
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
T generate_random_val(int min, int max, int k = 8) {
|
||||
static std::default_random_engine generator(random_seed);
|
||||
// 1/k is the resolution of the floating point numbers
|
||||
std::uniform_int_distribution<int> distribution(k * min, k * max);
|
||||
T val = (T)distribution(generator);
|
||||
val /= k;
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
std::vector<T> generate_unique_indices(const scatter_nd_update_basic_test_params& p) {
|
||||
std::set<std::vector<T>> unique_indices;
|
||||
@ -4460,6 +4461,111 @@ TEST(scatter_nd_update_gpu, dynamic) {
|
||||
}
|
||||
}
|
||||
|
||||
TEST(scatter_nd_update_gpu, dynamic_5d) {
|
||||
auto& engine = get_test_engine();
|
||||
|
||||
auto input1_layout = layout{{ 8, -1, -1, 384}, data_types::f32, format::bfyx };
|
||||
auto input2_layout = layout{{-1, -1, -1, -1, -1}, data_types::i32, format::bfzyx };
|
||||
auto input3_layout = layout{{-1, -1, -1, 384}, data_types::f32, format::bfyx };
|
||||
|
||||
topology topology;
|
||||
topology.add(input_layout("data", input1_layout));
|
||||
topology.add(input_layout("indices", input2_layout));
|
||||
topology.add(input_layout("updates", input3_layout));
|
||||
topology.add(scatter_nd_update("scatter_nd_update", input_info("data"), input_info("indices"), input_info("updates"), 5));
|
||||
|
||||
ExecutionConfig config;
|
||||
config.set_property(ov::intel_gpu::allow_new_shape_infer(true));
|
||||
network network(engine, topology, config);
|
||||
|
||||
auto get_expected_res = [](const std::vector<float>& input,
|
||||
const std::vector<int32_t>& indices,
|
||||
const std::vector<float>& updates,
|
||||
ov::Shape input_shape,
|
||||
ov::Shape indices_shape,
|
||||
ov::Shape updates_shape) -> std::vector<float> {
|
||||
size_t count = std::accumulate(input_shape.begin(), input_shape.end(), 1, std::multiplies<size_t>());
|
||||
auto outputs_ref = std::vector<float>(count);
|
||||
ngraph::runtime::reference::scatterNdUpdate<float, int32_t>(input.data(),
|
||||
indices.data(),
|
||||
updates.data(),
|
||||
outputs_ref.data(),
|
||||
input_shape,
|
||||
indices_shape,
|
||||
updates_shape);
|
||||
|
||||
return outputs_ref;
|
||||
};
|
||||
|
||||
|
||||
auto generate_unique_indices = [](ov::Shape data_shape, ov::Shape indices_shape) -> std::vector<int32_t>{
|
||||
std::set<std::vector<int32_t>> unique_indices;
|
||||
std::vector<int32_t> result;
|
||||
size_t last_indices_dim = indices_shape.at(indices_shape.size() - 1);
|
||||
|
||||
size_t count = std::accumulate(indices_shape.begin(), indices_shape.end(), 1, std::multiplies<size_t>()) / last_indices_dim;
|
||||
|
||||
while (unique_indices.size() != count) {
|
||||
std::vector<int32_t> indices;
|
||||
for (size_t i = 0; i < last_indices_dim; i++) {
|
||||
indices.push_back(static_cast<int32_t>(generate_random_val<int>(0, data_shape[i] - 1)));
|
||||
}
|
||||
|
||||
unique_indices.insert(indices);
|
||||
}
|
||||
|
||||
std::for_each(unique_indices.begin(),
|
||||
unique_indices.end(),
|
||||
[&](const std::vector<int32_t>& indices) {
|
||||
result.insert(result.end(), indices.begin(), indices.end());
|
||||
});
|
||||
|
||||
return result;
|
||||
};
|
||||
|
||||
std::vector<std::vector<ov::Shape>> test_shapes = {
|
||||
{ { 8, 3, 1, 384 }, { 1, 3, 1, 384, 4 }, { 1, 3, 1, 384 } },
|
||||
{ { 8, 3, 2, 384 }, { 1, 3, 1, 384, 4 }, { 1, 3, 1, 384 } },
|
||||
};
|
||||
|
||||
for (auto& shapes : test_shapes) {
|
||||
ov::Shape in1_shape = shapes[0];
|
||||
ov::Shape in2_shape = shapes[1];
|
||||
ov::Shape in3_shape = shapes[2];
|
||||
auto input1 = engine.allocate_memory({ in1_shape, data_types::f32, format::bfyx }); // Dictionary
|
||||
auto input2 = engine.allocate_memory({ in2_shape, data_types::i32, format::bfzyx }); // Indexes
|
||||
auto input3 = engine.allocate_memory({ in3_shape, data_types::f32, format::bfyx }); // Updates
|
||||
|
||||
std::vector<float> input_data = generate_random_1d<float>(input1->count(), 1, 100);
|
||||
std::vector<int32_t> indices = generate_unique_indices(in1_shape, in2_shape);
|
||||
std::vector<float> updates = generate_random_1d<float>(input3->count(), 100, 200);
|
||||
auto expected_res = get_expected_res(input_data, indices, updates, in1_shape, in2_shape, in3_shape);
|
||||
|
||||
set_values<float>(input1, input_data);
|
||||
set_values<int32_t>(input2, indices);
|
||||
set_values<float>(input3, updates);
|
||||
|
||||
network.set_input_data("data", input1);
|
||||
network.set_input_data("indices", input2);
|
||||
network.set_input_data("updates", input3);
|
||||
|
||||
auto inst = network.get_primitive("scatter_nd_update");
|
||||
auto impl = inst->get_impl();
|
||||
ASSERT_TRUE(impl != nullptr);
|
||||
ASSERT_TRUE(impl->is_dynamic());
|
||||
|
||||
auto outputs = network.execute();
|
||||
|
||||
auto output = outputs.at("scatter_nd_update").get_memory();
|
||||
ASSERT_EQ(output->get_layout().get_partial_shape(), input1->get_layout().get_partial_shape());
|
||||
cldnn::mem_lock<float> output_ptr(output, get_test_stream());
|
||||
|
||||
for (size_t i = 0; i < expected_res.size(); ++i) {
|
||||
ASSERT_EQ(expected_res[i], output_ptr[i]) << " i = " << i;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef RUN_ALL_MODEL_CACHING_TESTS
|
||||
TEST_P(scatter_nd_update_random_test, random_cached)
|
||||
{
|
||||
|
@ -33,7 +33,10 @@ public:
|
||||
void SetUp() override {
|
||||
auto params = GetParam();
|
||||
function = CreateFunction(params);
|
||||
inputData = {params.dataTensor.data};
|
||||
inputData = {params.dataTensor.data,
|
||||
params.blockShapeTensor.data,
|
||||
params.cropsBeginTensor.data,
|
||||
params.cropsEndTensor.data};
|
||||
refOutData = {params.expectedTensor.data};
|
||||
}
|
||||
|
||||
@ -61,11 +64,12 @@ public:
|
||||
private:
|
||||
static std::shared_ptr<Model> CreateFunction(const BatchToSpaceParams& params) {
|
||||
const auto data = std::make_shared<opset1::Parameter>(params.dataTensor.type, params.dataTensor.shape);
|
||||
const auto blockShape = std::make_shared<opset1::Constant>(element::i64, params.blockShapeTensor.shape, params.blockShapeTensor.data.data());
|
||||
const auto cropsBegin = std::make_shared<opset1::Constant>(element::i64, params.cropsBeginTensor.shape, params.cropsBeginTensor.data.data());
|
||||
const auto cropsEnd = std::make_shared<opset1::Constant>(element::i64, params.cropsEndTensor.shape, params.cropsEndTensor.data.data());
|
||||
const auto blockShape = std::make_shared<opset1::Parameter>(element::i64, params.blockShapeTensor.shape);
|
||||
const auto cropsBegin = std::make_shared<opset1::Parameter>(element::i64, params.cropsBeginTensor.shape);
|
||||
const auto cropsEnd = std::make_shared<opset1::Parameter>(element::i64, params.cropsEndTensor.shape);
|
||||
const auto batchToSpace = std::make_shared<opset2::BatchToSpace>(data, blockShape, cropsBegin, cropsEnd);
|
||||
return std::make_shared<Model>(NodeVector {batchToSpace}, ParameterVector {data});
|
||||
return std::make_shared<Model>(NodeVector{batchToSpace},
|
||||
ParameterVector{data, blockShape, cropsBegin, cropsEnd});
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -34,7 +34,10 @@ public:
|
||||
void SetUp() override {
|
||||
auto params = GetParam();
|
||||
function = CreateFunction(params);
|
||||
inputData = {params.dataTensor.data};
|
||||
inputData = {params.dataTensor.data,
|
||||
params.blockShapeTensor.data,
|
||||
params.padsBeginTensor.data,
|
||||
params.padsEndTensor.data};
|
||||
refOutData = {params.expectedTensor.data};
|
||||
}
|
||||
|
||||
@ -62,11 +65,12 @@ public:
|
||||
private:
|
||||
static std::shared_ptr<Model> CreateFunction(const SpaceToBatchParams& params) {
|
||||
const auto data = std::make_shared<opset1::Parameter>(params.dataTensor.type, params.dataTensor.shape);
|
||||
const auto blockShape = std::make_shared<opset1::Constant>(element::i64, params.blockShapeTensor.shape, params.blockShapeTensor.data.data());
|
||||
const auto padsBegin = std::make_shared<opset1::Constant>(element::i64, params.padsBeginTensor.shape, params.padsBeginTensor.data.data());
|
||||
const auto padsEnd = std::make_shared<opset1::Constant>(element::i64, params.padsEndTensor.shape, params.padsEndTensor.data.data());
|
||||
const auto blockShape = std::make_shared<opset1::Parameter>(element::i64, params.blockShapeTensor.shape);
|
||||
const auto padsBegin = std::make_shared<opset1::Parameter>(element::i64, params.padsBeginTensor.shape);
|
||||
const auto padsEnd = std::make_shared<opset1::Parameter>(element::i64, params.padsEndTensor.shape);
|
||||
const auto batchToSpace = std::make_shared<opset2::SpaceToBatch>(data, blockShape, padsBegin, padsEnd);
|
||||
return std::make_shared<ov::Model>(NodeVector {batchToSpace}, ParameterVector {data});
|
||||
return std::make_shared<ov::Model>(NodeVector{batchToSpace},
|
||||
ParameterVector{data, blockShape, padsBegin, padsEnd});
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -681,7 +681,7 @@ TEST_P(OVExecutableNetworkBaseTest, precisionsAsInOriginalIR) {
|
||||
auto filePrefix = CommonTestUtils::generateTestFilePrefix();
|
||||
const std::string m_out_xml_path_1 = filePrefix + "precisionsAsInOriginalIR.xml";
|
||||
const std::string m_out_bin_path_1 = filePrefix + "precisionsAsInOriginalIR.bin";
|
||||
ov::pass::Serialize(m_out_xml_path_1, m_out_bin_path_1).run_on_function(function);
|
||||
ov::pass::Serialize(m_out_xml_path_1, m_out_bin_path_1).run_on_model(function);
|
||||
|
||||
ov::CompiledModel execNet;
|
||||
EXPECT_NO_THROW(execNet = core->compile_model(m_out_xml_path_1, target_device, configuration));
|
||||
|
@ -62,7 +62,7 @@ void AddTransformation::SetUp() {
|
||||
precision, inputShape, param.broadcast,
|
||||
param.fakeQuantize1, param.fakeQuantize2);
|
||||
|
||||
ov::pass::InitNodeInfo().run_on_function(function);
|
||||
ov::pass::InitNodeInfo().run_on_model(function);
|
||||
}
|
||||
|
||||
TEST_P(AddTransformation, CompareWithRefImpl) {
|
||||
|
@ -67,7 +67,7 @@ void ElementwiseBranchSelectionTransformation::SetUp() {
|
||||
param.branch2.fakeQuantizeAfter,
|
||||
param.fakeQuantizeAfter);
|
||||
|
||||
ov::pass::InitNodeInfo().run_on_function(function);
|
||||
ov::pass::InitNodeInfo().run_on_model(function);
|
||||
}
|
||||
|
||||
void ElementwiseBranchSelectionTransformation::Run() {
|
||||
|
@ -40,7 +40,7 @@ void FakeQuantizeAndAvgPoolTransformation::SetUp() {
|
||||
inputShape,
|
||||
fakeQuantize);
|
||||
|
||||
ov::pass::InitNodeInfo().run_on_function(function);
|
||||
ov::pass::InitNodeInfo().run_on_model(function);
|
||||
}
|
||||
|
||||
TEST_P(FakeQuantizeAndAvgPoolTransformation, CompareWithRefImpl) {
|
||||
|
@ -39,7 +39,7 @@ void FakeQuantizeAndMaxPoolTransformation::SetUp() {
|
||||
inputShape,
|
||||
fakeQuantize);
|
||||
|
||||
ov::pass::InitNodeInfo().run_on_function(function);
|
||||
ov::pass::InitNodeInfo().run_on_model(function);
|
||||
}
|
||||
|
||||
TEST_P(FakeQuantizeAndMaxPoolTransformation, CompareWithRefImpl) {
|
||||
|
@ -44,7 +44,7 @@ void FakeQuantizePrecisionSelectionTransformation::SetUp() {
|
||||
testValues.actual.fakeQuantizeOnWeights
|
||||
});
|
||||
|
||||
ov::pass::InitNodeInfo().run_on_function(function);
|
||||
ov::pass::InitNodeInfo().run_on_model(function);
|
||||
}
|
||||
|
||||
TEST_P(FakeQuantizePrecisionSelectionTransformation, CompareWithRefImpl) {
|
||||
|
@ -49,7 +49,7 @@ void FakeQuantizeTransformation::SetUp() {
|
||||
testParams.fakequantize,
|
||||
true);
|
||||
|
||||
ov::pass::InitNodeInfo().run_on_function(function);
|
||||
ov::pass::InitNodeInfo().run_on_model(function);
|
||||
}
|
||||
|
||||
void FakeQuantizeTransformation::Run() {
|
||||
|
@ -39,7 +39,7 @@ void FuseFakeQuantizeAndScaleShiftTransformation::SetUp() {
|
||||
inputShape,
|
||||
fakeQuantizeOnData);
|
||||
|
||||
ov::pass::InitNodeInfo().run_on_function(function);
|
||||
ov::pass::InitNodeInfo().run_on_model(function);
|
||||
}
|
||||
|
||||
TEST_P(FuseFakeQuantizeAndScaleShiftTransformation, CompareWithRefImpl) {
|
||||
|
@ -46,7 +46,7 @@ void FuseFakeQuantizeTransformation::SetUp() {
|
||||
testValues.actual.precisionAfterDequantization,
|
||||
testValues.actual.fakeQuantizeOnData);
|
||||
|
||||
ov::pass::InitNodeInfo().run_on_function(function);
|
||||
ov::pass::InitNodeInfo().run_on_model(function);
|
||||
}
|
||||
|
||||
TEST_P(FuseFakeQuantizeTransformation, CompareWithRefImpl) {
|
||||
|
@ -35,7 +35,7 @@ void FuseMultiplyToFakeQuantizeTransformation::SetUp() {
|
||||
testValues.actual.fakeQuantizeOnData,
|
||||
testValues.actual.dequantization);
|
||||
|
||||
ov::pass::InitNodeInfo().run_on_function(function);
|
||||
ov::pass::InitNodeInfo().run_on_model(function);
|
||||
}
|
||||
|
||||
TEST_P(FuseMultiplyToFakeQuantizeTransformation, CompareWithRefImpl) {
|
||||
|
@ -35,7 +35,7 @@ void FuseSubtractToFakeQuantizeTransformation::SetUp() {
|
||||
testValues.actual.fakeQuantizeOnData,
|
||||
testValues.actual.dequantization);
|
||||
|
||||
ov::pass::InitNodeInfo().run_on_function(function);
|
||||
ov::pass::InitNodeInfo().run_on_model(function);
|
||||
}
|
||||
|
||||
TEST_P(FuseSubtractToFakeQuantizeTransformation, CompareWithRefImpl) {
|
||||
|
@ -71,7 +71,7 @@ void MatMulTransformation::SetUp() {
|
||||
testValues.inputShape2,
|
||||
testValues.fqOnData2);
|
||||
|
||||
ov::pass::InitNodeInfo().run_on_function(function);
|
||||
ov::pass::InitNodeInfo().run_on_model(function);
|
||||
}
|
||||
|
||||
void MatMulTransformation::Run() {
|
||||
|
@ -70,7 +70,7 @@ void MatMulWithConstantTransformation::SetUp() {
|
||||
testValues.fqOnWeights,
|
||||
testValues.deqOnWeights);
|
||||
|
||||
ov::pass::InitNodeInfo().run_on_function(function);
|
||||
ov::pass::InitNodeInfo().run_on_model(function);
|
||||
}
|
||||
|
||||
void MatMulWithConstantTransformation::Run() {
|
||||
|
@ -66,7 +66,7 @@ void MultiplyTransformation::SetUp() {
|
||||
param.fakeQuantizeAfter,
|
||||
param.secondInputIsConstant);
|
||||
|
||||
ov::pass::InitNodeInfo().run_on_function(function);
|
||||
ov::pass::InitNodeInfo().run_on_model(function);
|
||||
}
|
||||
|
||||
void MultiplyTransformation::Run() {
|
||||
|
@ -54,7 +54,7 @@ void PReluTransformation::SetUp() {
|
||||
|
||||
function = ngraph::builder::subgraph::PReluFunction::getOriginal(inputShape, precision, testValues.fakeQuantize);
|
||||
|
||||
ov::pass::InitNodeInfo().run_on_function(function);
|
||||
ov::pass::InitNodeInfo().run_on_model(function);
|
||||
}
|
||||
|
||||
TEST_P(PReluTransformation, CompareWithRefImpl) {
|
||||
|
@ -54,7 +54,7 @@ void ReluTransformation::SetUp() {
|
||||
|
||||
function = ngraph::builder::subgraph::ReluFunction::getOriginal(inputShape, precision, testValues.fakeQuantize);
|
||||
|
||||
ov::pass::InitNodeInfo().run_on_function(function);
|
||||
ov::pass::InitNodeInfo().run_on_model(function);
|
||||
}
|
||||
|
||||
TEST_P(ReluTransformation, CompareWithRefImpl) {
|
||||
|
@ -75,7 +75,7 @@ void SqueezeTransformation::SetUp() {
|
||||
squeezeParam.fakeQuantize,
|
||||
squeezeParam.squeezeAxes);
|
||||
|
||||
ov::pass::InitNodeInfo().run_on_function(function);
|
||||
ov::pass::InitNodeInfo().run_on_model(function);
|
||||
}
|
||||
|
||||
TEST_P(SqueezeTransformation, CompareWithRefImpl) {
|
||||
|
@ -75,7 +75,7 @@ void UnsqueezeTransformation::SetUp() {
|
||||
unsqueezeParam.fakeQuantize,
|
||||
unsqueezeParam.unsqueezeAxes);
|
||||
|
||||
ov::pass::InitNodeInfo().run_on_function(function);
|
||||
ov::pass::InitNodeInfo().run_on_model(function);
|
||||
}
|
||||
|
||||
TEST_P(UnsqueezeTransformation, CompareWithRefImpl) {
|
||||
|
@ -64,8 +64,8 @@ namespace snippets {
|
||||
"CodegenGelu");
|
||||
|
||||
if (useSubgraph) {
|
||||
ov::pass::InitNodeInfo().run_on_function(function);
|
||||
ngraph::pass::ConstantFolding().run_on_function(function);
|
||||
ov::pass::InitNodeInfo().run_on_model(function);
|
||||
ngraph::pass::ConstantFolding().run_on_model(function);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -417,8 +417,8 @@ void LayerTestsCommon::Infer() {
|
||||
}
|
||||
|
||||
void LayerTestsCommon::ConvertRefsParams() {
|
||||
ngraph::pass::ConvertPrecision<ngraph::element::Type_t::f16, ngraph::element::Type_t::f32>().run_on_function(functionRefs);
|
||||
ngraph::pass::ConvertPrecision<ngraph::element::Type_t::bf16, ngraph::element::Type_t::f32>().run_on_function(functionRefs);
|
||||
ngraph::pass::ConvertPrecision<ngraph::element::Type_t::f16, ngraph::element::Type_t::f32>().run_on_model(functionRefs);
|
||||
ngraph::pass::ConvertPrecision<ngraph::element::Type_t::bf16, ngraph::element::Type_t::f32>().run_on_model(functionRefs);
|
||||
}
|
||||
|
||||
std::vector<std::pair<ngraph::element::Type, std::vector<std::uint8_t>>> LayerTestsCommon::CalculateRefs() {
|
||||
|
@ -82,7 +82,7 @@ void RandomUniformLayerTest::SetUp() {
|
||||
|
||||
void RandomUniformLayerTest::ConvertRefsParams() {
|
||||
// we shouldn't use default conversion from f16 to f32
|
||||
ngraph::pass::ConvertPrecision<ngraph::element::Type_t::bf16, ngraph::element::Type_t::f32>().run_on_function(
|
||||
ngraph::pass::ConvertPrecision<ngraph::element::Type_t::bf16, ngraph::element::Type_t::f32>().run_on_model(
|
||||
functionRefs);
|
||||
}
|
||||
|
||||
|
@ -10,6 +10,7 @@ from defusedxml import defuse_stdlib
|
||||
|
||||
from utils.conformance_utils import get_logger
|
||||
from utils import stat_update_utils
|
||||
from utils.constants import OP_CONFORMANCE, API_CONFORMANCE
|
||||
|
||||
# defuse_stdlib provide patched version of xml.etree.ElementTree which allows to use objects from xml.etree.ElementTree
|
||||
# in a safe manner without including unsafe xml.etree.ElementTree
|
||||
@ -83,9 +84,11 @@ def aggregate_test_results(aggregated_results: SubElement, xml_reports: list, re
|
||||
for xml_real_device_entry in xml_results_entry:
|
||||
aggregated_real_device_api_report = aggregated_results_entry.find(xml_real_device_entry.tag)
|
||||
if aggregated_real_device_api_report is None:
|
||||
stat_update_utils.update_rel_values(xml_results_entry)
|
||||
aggregated_results_entry.append(xml_real_device_entry)
|
||||
continue
|
||||
update_result_node(xml_real_device_entry, aggregated_real_device_api_report)
|
||||
a = 1
|
||||
return aggregated_timestamp
|
||||
|
||||
|
||||
@ -95,13 +98,13 @@ def merge_xml(input_folder_paths: list, output_folder_paths: str, output_filenam
|
||||
summary = Element("report")
|
||||
results = SubElement(summary, "results")
|
||||
entity_name = None
|
||||
if report_type == "OP":
|
||||
if report_type == OP_CONFORMANCE.lower() or report_type == OP_CONFORMANCE:
|
||||
entity_name = "ops_list"
|
||||
elif report_type == "API":
|
||||
elif report_type == API_CONFORMANCE.lower() or report_type == API_CONFORMANCE:
|
||||
entity_name = "api_list"
|
||||
else:
|
||||
raise Exception(f"Error to create aggregated report. Incorrect report type: {report_type}")
|
||||
|
||||
|
||||
entity_list = SubElement(summary, entity_name)
|
||||
|
||||
for folder_path in input_folder_paths:
|
||||
@ -113,9 +116,9 @@ def merge_xml(input_folder_paths: list, output_folder_paths: str, output_filenam
|
||||
continue
|
||||
|
||||
xml_reports = None
|
||||
if report_type == "OP":
|
||||
if report_type == OP_CONFORMANCE.lower() or report_type == OP_CONFORMANCE:
|
||||
xml_reports = glob.glob(os.path.join(folder_path, 'report_op*.xml'))
|
||||
elif report_type == "API":
|
||||
elif report_type == API_CONFORMANCE.lower() or report_type == API_CONFORMANCE:
|
||||
xml_reports = glob.glob(os.path.join(folder_path, 'report_api*.xml'))
|
||||
logger.info(f"Num of XML: {len(xml_reports)}")
|
||||
|
||||
|
@ -6,12 +6,12 @@ import xml.etree.ElementTree as ET
|
||||
from . import conformance_utils
|
||||
|
||||
def update_rel_values(xml_node: ET.SubElement):
|
||||
if xml_node is None:
|
||||
if xml_node is None or len(xml_node.attrib) == 0:
|
||||
return
|
||||
if not "relative_all" in xml_node.attrib:
|
||||
test_cnt = int(xml_node.attrib.get("passed")) + int(xml_node.attrib.get("failed")) + int(xml_node.attrib.get("skipped")) + \
|
||||
int(xml_node.attrib.get("crashed")) + int(xml_node.attrib.get("hanged"))
|
||||
xml_node.set("relative_all", test_cnt)
|
||||
xml_node.set("relative_all", str(test_cnt))
|
||||
if not "relative_passed" in xml_node.attrib:
|
||||
xml_node.set("relative_passed", xml_node.attrib.get("passed"))
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user