Partial revert a commit with reference implementations of PriorBox(Clustered) ops (#1878)

* partial revert a commit with reference implementations of PriorBox(Clustered), disable references for this ops

* ngraph codestyle

* disable const folding unit tests for PriorBox(Clustered)

* fix arm build

* fix unit test

* Revert "fix unit test"

This reverts commit 1fe59e55d6.
This commit is contained in:
Ivan Tikhonov 2020-08-24 14:31:32 +03:00 committed by GitHub
parent e6c371ae2e
commit cb72684388
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
16 changed files with 1196 additions and 27 deletions

View File

@ -22,6 +22,8 @@
#include "ngraph_ops/pad_ie.hpp"
#include "ngraph_ops/onehot_ie.hpp"
#include "ngraph_ops/power.hpp"
#include "ngraph_ops/prior_box_clustered_ie.hpp"
#include "ngraph_ops/prior_box_ie.hpp"
#include "ngraph_ops/proposal_ie.hpp"
#include "ngraph_ops/relu_ie.hpp"
#include "ngraph_ops/scaleshift.hpp"
@ -510,15 +512,21 @@ InferenceEngine::details::CNNLayerCreator::CNNLayerCreator(const std::shared_ptr
addSpecificCreator({"PriorBox"}, [](const std::shared_ptr<::ngraph::Node>& node,
const std::map<std::string, std::string> params) -> CNNLayerPtr {
// Todo (itikhono): replace the message after supporting constants as outputs in plugins
// THROW_IE_EXCEPTION << "PriorBox operation has a form that is not supported." << node->get_friendly_name()
// << " should be replaced by constant during constant folding.";
THROW_IE_EXCEPTION << "PriorBox operation has a form that is not supported." << node->get_friendly_name()
<< " should be replaced by constant during constant folding.";
<< " should be converted to PriorBoxIE operation";
return nullptr;
});
addSpecificCreator({"PriorBoxClustered"}, [](const std::shared_ptr<::ngraph::Node>& node,
const std::map<std::string, std::string> params) -> CNNLayerPtr {
// Todo (itikhono): replace the message after supporting constants as outputs in plugins
// THROW_IE_EXCEPTION << "PriorBoxClustered operation has a form that is not supported." << node->get_friendly_name()
// << " should be replaced by constant during constant folding.";
THROW_IE_EXCEPTION << "PriorBoxClustered operation has a form that is not supported." << node->get_friendly_name()
<< " should be replaced by constant during constant folding.";
<< " should be converted to PriorBoxClusteredIE operation";
return nullptr;
});
@ -639,6 +647,10 @@ void convertFunctionToICNNNetwork(const std::shared_ptr<const ::ngraph::Function
std::make_shared<Builder::NodeConverter<::ngraph::op::PadIE>>(),
std::make_shared<Builder::NodeConverter<::ngraph::op::v1::Power>>(),
std::make_shared<Builder::NodeConverter<::ngraph::op::PowerIE>>(),
std::make_shared<Builder::NodeConverter<::ngraph::op::PriorBox>>(),
std::make_shared<Builder::NodeConverter<::ngraph::op::PriorBoxClustered>>(),
std::make_shared<Builder::NodeConverter<::ngraph::op::PriorBoxClusteredIE>>(),
std::make_shared<Builder::NodeConverter<::ngraph::op::PriorBoxIE>>(),
std::make_shared<Builder::NodeConverter<::ngraph::op::ProposalIE>>(),
std::make_shared<Builder::NodeConverter<::ngraph::op::Relu>>(),
std::make_shared<Builder::NodeConverter<::ngraph::op::SeluIE>>(),

View File

@ -25,6 +25,8 @@
#include "ngraph_ops/onehot_ie.hpp"
#include "ngraph_ops/pad_ie.hpp"
#include "ngraph_ops/power.hpp"
#include "ngraph_ops/prior_box_clustered_ie.hpp"
#include "ngraph_ops/prior_box_ie.hpp"
#include "ngraph_ops/proposal_ie.hpp"
#include "ngraph_ops/relu_ie.hpp"
#include "ngraph_ops/selu_ie.hpp"
@ -1467,6 +1469,136 @@ CNNLayer::Ptr NodeConverter<ngraph::op::ProposalIE>::createLayer(const std::shar
return res;
}
template <>
CNNLayer::Ptr NodeConverter<ngraph::op::PriorBoxClusteredIE>::createLayer(
const std::shared_ptr<ngraph::Node>& layer) const {
LayerParams params = {layer->get_friendly_name(), "PriorBoxClustered",
details::convertPrecision(layer->get_output_element_type(0))};
auto res = std::make_shared<InferenceEngine::CNNLayer>(params);
auto castedLayer = ngraph::as_type_ptr<ngraph::op::PriorBoxClusteredIE>(layer);
if (castedLayer == nullptr) THROW_IE_EXCEPTION << "Cannot get " << params.type << " layer " << params.name;
auto attr = castedLayer->get_attrs();
std::string param;
for (const auto& val : attr.widths) {
if (!param.empty()) param += ",";
param += asString(val);
}
res->params["width"] = param;
param.clear();
for (const auto& val : attr.heights) {
if (!param.empty()) param += ",";
param += asString(val);
}
res->params["height"] = param;
param.clear();
for (const auto& val : attr.variances) {
if (!param.empty()) param += ",";
param += asString(val);
}
res->params["variance"] = param;
if (std::abs(attr.step_heights - attr.step_widths) < 1e-5) {
res->params["step"] = asString(attr.step_widths);
} else {
res->params["step_w"] = asString(attr.step_widths);
res->params["step_h"] = asString(attr.step_heights);
}
res->params["offset"] = asString(attr.offset);
res->params["clip"] = asString(attr.clip ? 1 : 0);
res->params["flip"] = "1";
return res;
}
template <>
CNNLayer::Ptr NodeConverter<ngraph::op::PriorBoxClustered>::createLayer(
const std::shared_ptr<ngraph::Node>& layer) const {
THROW_IE_EXCEPTION << "PriorBoxClustered operation must be converted to PriorBoxClusteredIE operation.";
}
template <>
CNNLayer::Ptr NodeConverter<ngraph::op::PriorBoxIE>::createLayer(const std::shared_ptr<ngraph::Node>& layer) const {
LayerParams params = {layer->get_friendly_name(), "PriorBox",
details::convertPrecision(layer->get_output_element_type(0))};
auto res = std::make_shared<InferenceEngine::CNNLayer>(params);
auto castedLayer = ngraph::as_type_ptr<ngraph::op::PriorBoxIE>(layer);
auto layer_info = params.type + " layer " + params.name;
if (castedLayer == nullptr) THROW_IE_EXCEPTION << "Cannot get " << layer_info;
auto attr = castedLayer->get_attrs();
std::string param;
auto data_pshape = castedLayer->get_input_partial_shape(0);
if (data_pshape.is_dynamic()) THROW_IE_EXCEPTION << "Dynamic 0-port input of " << layer_info << " is not supported";
auto data_shape = data_pshape.to_shape();
if (data_shape.size() != 4) THROW_IE_EXCEPTION << layer_info << " has " << data_shape.size() << " items in 0-port input, 4 expected";
auto img_pshape = castedLayer->get_input_partial_shape(1);
if (img_pshape.is_dynamic()) THROW_IE_EXCEPTION << "Dynamic 1-port input of " << layer_info << " is not supported";
auto img_shape = img_pshape.to_shape();
if (img_shape.size() != 4) THROW_IE_EXCEPTION << layer_info << " has " << data_shape.size() << " items in 1-port input, 4 expected";
if (!attr.scale_all_sizes) {
// mxnet-like PriorBox
auto img_H = img_shape[2];
auto data_H = data_shape[2];
if (attr.step == -1)
attr.step = 1. * img_H / data_H;
else
attr.step *= img_H;
for (auto& size : attr.min_size)
size *= img_H;
}
for (const auto& val : attr.max_size) {
if (!param.empty()) param += ",";
param += asString(val);
}
res->params["max_size"] = param;
param.clear();
for (const auto& val : attr.min_size) {
if (!param.empty()) param += ",";
param += asString(val);
}
res->params["min_size"] = param;
param.clear();
for (const auto& val : attr.aspect_ratio) {
if (!param.empty()) param += ",";
param += asString(val);
}
res->params["aspect_ratio"] = param;
param.clear();
for (const auto& val : attr.variance) {
if (!param.empty()) param += ",";
param += asString(val);
}
res->params["variance"] = param;
res->params["step"] = asString(attr.step);
res->params["offset"] = asString(attr.offset);
res->params["clip"] = asString(attr.clip ? 1 : 0);
res->params["flip"] = asString(attr.flip ? 1 : 0);
res->params["scale_all_sizes"] = asString(attr.scale_all_sizes ? 1 : 0);
res->params["density"] = asString(attr.density);
res->params["fixed_size"] = asString(attr.fixed_size);
res->params["fixed_ratio"] = asString(attr.fixed_ratio);
return res;
}
template <>
CNNLayer::Ptr NodeConverter<ngraph::op::PriorBox>::createLayer(const std::shared_ptr<ngraph::Node>& layer) const {
THROW_IE_EXCEPTION << "PriorBox operation must be converted to PriorBoxIE operation.";
}
template <>
CNNLayer::Ptr NodeConverter<ngraph::op::PowerIE>::createLayer(const std::shared_ptr<ngraph::Node>& layer) const {
LayerParams params = {layer->get_friendly_name(), "Power",

View File

@ -0,0 +1,43 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <memory>
#include <transformations_visibility.hpp>
#include <ngraph/op/op.hpp>
#include <ngraph/op/prior_box_clustered.hpp>
namespace ngraph {
namespace op {
class TRANSFORMATIONS_API PriorBoxClusteredIE : public Op {
public:
static constexpr NodeTypeInfo type_info{"PriorBoxClusteredIE", 1};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs a PriorBoxClusteredIE operation
///
/// \param layer Layer for which prior boxes are computed
/// \param image Input Input to which prior boxes are scaled
/// \param attrs PriorBoxClustered attributes
PriorBoxClusteredIE(const Output<Node>& input,
const Output<Node>& image,
const ngraph::op::PriorBoxClusteredAttrs& attrs);
void validate_and_infer_types() override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
const PriorBoxClusteredAttrs& get_attrs() const { return m_attrs; }
private:
PriorBoxClusteredAttrs m_attrs;
};
} // namespace op
} // namespace ngraph

View File

@ -0,0 +1,41 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <memory>
#include <transformations_visibility.hpp>
#include "ngraph/op/op.hpp"
#include "ngraph/op/prior_box.hpp"
namespace ngraph {
namespace op {
class TRANSFORMATIONS_API PriorBoxIE : public Op {
public:
static constexpr NodeTypeInfo type_info{"PriorBoxIE", 1};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs a PriorBoxIE operation
///
/// \param layer Layer for which prior boxes are computed
/// \param image Input Input to which prior boxes are scaled
/// \param attrs PriorBox attributes
PriorBoxIE(const Output<Node>& input,
const Output<Node>& image,
const ngraph::op::PriorBoxAttrs& attrs);
void validate_and_infer_types() override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
const PriorBoxAttrs& get_attrs() const { return m_attrs; }
private:
PriorBoxAttrs m_attrs;
};
} // namespace op
} // namespace ngraph

View File

@ -0,0 +1,33 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <vector>
#include <memory>
#include <transformations_visibility.hpp>
#include <ngraph/pass/graph_rewrite.hpp>
namespace ngraph {
namespace pass {
class TRANSFORMATIONS_API ConvertPriorBox;
} // namespace pass
} // namespace ngraph
class ngraph::pass::ConvertPriorBox: public ngraph::pass::GraphRewrite {
public:
ConvertPriorBox() : GraphRewrite() {
convert_prior_box();
convert_prior_box_clustered();
}
private:
void convert_prior_box();
void convert_prior_box_clustered();
};

View File

@ -0,0 +1,39 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph_ops/prior_box_clustered_ie.hpp"
#include <memory>
#include "ngraph/op/constant.hpp"
using namespace std;
using namespace ngraph;
constexpr NodeTypeInfo op::PriorBoxClusteredIE::type_info;
op::PriorBoxClusteredIE::PriorBoxClusteredIE(const Output<Node>& input, const Output<Node>& image,
const PriorBoxClusteredAttrs& attrs)
: Op({input, image}), m_attrs(attrs) {
constructor_validate_and_infer_types();
}
void op::PriorBoxClusteredIE::validate_and_infer_types() {
if (get_input_partial_shape(0).is_dynamic() || get_input_partial_shape(1).is_dynamic()) {
set_output_type(0, element::f32, PartialShape::dynamic(3));
return;
}
auto input_shape = get_input_shape(0);
auto image_shape = get_input_shape(1);
size_t num_priors = m_attrs.widths.size();
set_output_type(0, element::f32, Shape {1, 2, 4 * input_shape[2] * input_shape[3] * num_priors});
}
std::shared_ptr<Node> op::PriorBoxClusteredIE::clone_with_new_inputs(const OutputVector& new_args) const {
check_new_args_count(this, new_args);
return make_shared<PriorBoxClusteredIE>(new_args.at(0), new_args.at(1), m_attrs);
}

View File

@ -0,0 +1,36 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph_ops/prior_box_ie.hpp"
#include <memory>
#include "ngraph/op/constant.hpp"
using namespace std;
using namespace ngraph;
constexpr NodeTypeInfo op::PriorBoxIE::type_info;
op::PriorBoxIE::PriorBoxIE(const Output<Node>& input, const Output<Node>& image, const PriorBoxAttrs& attrs)
: Op({input, image}), m_attrs(attrs) {
constructor_validate_and_infer_types();
}
void op::PriorBoxIE::validate_and_infer_types() {
if (get_input_partial_shape(0).is_dynamic() || get_input_partial_shape(1).is_dynamic()) {
set_output_type(0, element::f32, PartialShape::dynamic(3));
return;
}
auto input_shape = get_input_shape(0);
auto image_shape = get_input_shape(1);
set_output_type(0, element::f32, Shape {
1, 2, 4 * input_shape[2] * input_shape[3] * static_cast<size_t>(op::PriorBox::number_of_priors(m_attrs))});
}
std::shared_ptr<Node> op::PriorBoxIE::clone_with_new_inputs(const OutputVector& new_args) const {
check_new_args_count(this, new_args);
return make_shared<PriorBoxIE>(new_args.at(0), new_args.at(1), m_attrs);
}

View File

@ -7,6 +7,7 @@
#include "transformations/common_optimizations/algebraic_simplification.hpp"
#include "transformations/common_optimizations/nop_elimination.hpp"
#include "transformations/common_optimizations/common_optimizations.hpp"
#include "transformations/convert_opset1_to_legacy/convert_prior_to_ie_prior.hpp"
#include "transformations/depth_to_space_fusion.hpp"
#include "transformations/optimize_strided_slice.hpp"
#include "transformations/convert_scatter_elements_to_scatter.hpp"
@ -28,6 +29,8 @@ bool ngraph::pass::CommonOptimizations::run_on_function(std::shared_ptr<ngraph::
// This pass must be called first in pipeline
manager.register_pass<ngraph::pass::InitNodeInfo>();
manager.register_pass<ngraph::pass::ConvertPriorBox>(); // WA: ConvertPriorBox must be executed before CF
manager.register_pass<ngraph::pass::ConstantFolding>();
manager.register_pass<ngraph::pass::RemoveFilteringBoxesBySize>(); // Resolves dynamism (replaces NonZero), CF needed
manager.register_pass<ngraph::pass::ConstantFolding>();
manager.register_pass<ngraph::pass::StridedSliceOptimization>(); // depends on CF

View File

@ -0,0 +1,294 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "transformations/convert_opset1_to_legacy/convert_prior_to_ie_prior.hpp"
#include <memory>
#include <vector>
#include <ngraph/opsets/opset3.hpp>
#include <ngraph/opsets/opset1.hpp>
#include <ngraph_ops/prior_box_ie.hpp>
#include <ngraph_ops/prior_box_clustered_ie.hpp>
#include <ngraph/rt_info.hpp>
void ngraph::pass::ConvertPriorBox::convert_prior_box() {
auto data = std::make_shared<pattern::op::Label>(element::i64, Shape{1, 1, 1, 1});
auto axes = ngraph::opset1::Constant::create(element::i64, Shape{1}, {0});
auto image = std::make_shared<pattern::op::Label>(element::i64, Shape{1, 1, 1, 1});
ngraph::op::PriorBoxAttrs attr;
attr.min_size = {162.0f};
attr.max_size = {213.0f};
attr.aspect_ratio = {2.0f, 3.0f};
attr.variance = {0.1f, 0.1f, 0.2f, 0.2f};
attr.step = 64.0f;
attr.offset = 0.5f;
attr.clip = 0;
attr.flip = 1;
attr.scale_all_sizes = true;
auto prior_box = std::make_shared<ngraph::opset1::PriorBox>(data, image, attr);
auto unsqueeze = std::make_shared<ngraph::opset1::Unsqueeze> (prior_box, axes);
ngraph::graph_rewrite_callback callback = [](pattern::Matcher& m) {
auto unsqueeze = std::dynamic_pointer_cast<ngraph::opset1::Unsqueeze> (m.get_match_root());
if (!unsqueeze) {
return false;
}
auto prior_box_node = std::dynamic_pointer_cast<ngraph::opset1::PriorBox> (unsqueeze->input_value(0).get_node_shared_ptr());
if (!prior_box_node) {
return false;
}
// vector of nGraph nodes that will be replaced
ngraph::NodeVector ops_to_replace{unsqueeze, prior_box_node};
std::shared_ptr<Node> input_1(prior_box_node->input_value(0).get_node_shared_ptr());
std::shared_ptr<Node> input_2(prior_box_node->input_value(1).get_node_shared_ptr());
auto convert1 = std::dynamic_pointer_cast<ngraph::opset1::Convert> (input_1);
auto convert2 = std::dynamic_pointer_cast<ngraph::opset1::Convert> (input_2);
if (convert1 && convert2) {
ops_to_replace.push_back(convert1);
ops_to_replace.push_back(convert2);
input_1 = convert1->input_value(0).get_node_shared_ptr();
input_2 = convert2->input_value(0).get_node_shared_ptr();
}
auto strided_slice1 = std::dynamic_pointer_cast<ngraph::opset1::StridedSlice> (input_1);
auto strided_slice2 = std::dynamic_pointer_cast<ngraph::opset1::StridedSlice> (input_2);
if (!strided_slice1 || !strided_slice2) {
return false;
}
ops_to_replace.push_back(strided_slice1);
ops_to_replace.push_back(strided_slice2);
// Check that StridedSlice1 cuts H,W dims for PriorBox
auto begin = std::dynamic_pointer_cast<ngraph::opset1::Constant> (strided_slice1->input_value(1).get_node_shared_ptr());
auto end = std::dynamic_pointer_cast<ngraph::opset1::Constant> (strided_slice1->input_value(2).get_node_shared_ptr());
auto stride = std::dynamic_pointer_cast<ngraph::opset1::Constant> (strided_slice1->input_value(3).get_node_shared_ptr());
if (!begin || !end || !stride) {
return false;
}
auto begin_val = begin->get_vector<int64_t>();
auto end_val = end->get_vector<int64_t>();
auto stride_val = stride->get_vector<int64_t>();
if (begin_val.size() != 1 && begin_val[0] != 2) {
return false;
}
if (end_val.size() != 1 && end_val[0] != 4) {
return false;
}
if (stride_val.size() != 1 && stride_val[0] != 1) {
return false;
}
// TODO: should we check second StridedSlice?
input_1 = strided_slice1->input_value(0).get_node_shared_ptr();
input_2 = strided_slice2->input_value(0).get_node_shared_ptr();
convert1 = std::dynamic_pointer_cast<ngraph::opset1::Convert> (input_1);
convert2 = std::dynamic_pointer_cast<ngraph::opset1::Convert> (input_2);
if (convert1 && convert2) {
ops_to_replace.push_back(convert1);
ops_to_replace.push_back(convert2);
input_1 = convert1->input_value(0).get_node_shared_ptr();
input_2 = convert2->input_value(0).get_node_shared_ptr();
}
// the input can be either ShapeOf-1 or ShapeOf-3
std::shared_ptr<ngraph::op::Op> shape_of1 = std::dynamic_pointer_cast<ngraph::opset1::ShapeOf> (input_1);
std::shared_ptr<ngraph::op::Op> shape_of2 = std::dynamic_pointer_cast<ngraph::opset1::ShapeOf> (input_2);
if (!shape_of1 || !shape_of2) {
shape_of1 = std::dynamic_pointer_cast<ngraph::opset3::ShapeOf>(input_1);
shape_of2 = std::dynamic_pointer_cast<ngraph::opset3::ShapeOf>(input_2);
}
if (!shape_of1 || !shape_of2) {
return false;
}
// keep this code for a while if will decide to run this transformation again in the opset1->legacy
// the input can be either ShapeOf or Convert(ShapeOf)
// if (!shape_of1 || !shape_of2) {
// auto shapeof1_convert = std::dynamic_pointer_cast<ngraph::opset1::Convert> (input_1);
// auto shapeof2_convert = std::dynamic_pointer_cast<ngraph::opset1::Convert> (input_2);
// if (!shapeof1_convert || !shapeof2_convert)
// return false;
// shape_of1 = std::dynamic_pointer_cast<ngraph::opset1::ShapeOf>(shapeof1_convert->input_value(0).get_node_shared_ptr());
// shape_of2 = std::dynamic_pointer_cast<ngraph::opset1::ShapeOf>(shapeof2_convert->input_value(0).get_node_shared_ptr());
// if (!shape_of1 || !shape_of2)
// return false;
// ops_to_replace.push_back(shapeof1_convert);
// ops_to_replace.push_back(shapeof2_convert);
// }
ops_to_replace.push_back(shape_of1);
ops_to_replace.push_back(shape_of2);
auto prior_box_ie = std::make_shared<ngraph::op::PriorBoxIE> (shape_of1->input_value(0),
shape_of2->input_value(0),
prior_box_node->get_attrs());
prior_box_ie->set_friendly_name(unsqueeze->get_friendly_name());
// Nodes in copy runtime info function should be in topological order
std::reverse(ops_to_replace.begin(), ops_to_replace.end());
ngraph::copy_runtime_info(ops_to_replace, prior_box_ie);
ngraph::replace_node(m.get_match_root(), prior_box_ie);
return true;
};
auto m = std::make_shared<ngraph::pattern::Matcher>(unsqueeze, "CPUFusion.ConvertPriorBoxToPriorBoxIE");
this->add_matcher(m, callback, PassProperty::CHANGE_DYNAMIC_STATE);
}
void ngraph::pass::ConvertPriorBox::convert_prior_box_clustered() {
auto data = std::make_shared<pattern::op::Label>(element::i64, Shape{1, 1, 1, 1});
auto axes = ngraph::opset1::Constant::create(element::i64, Shape{1}, {0});
auto image = std::make_shared<pattern::op::Label>(element::i64, Shape{1, 1, 1, 1});
ngraph::op::PriorBoxClusteredAttrs attr;
attr.widths = {0.1f, 0.1f, 0.2f, 0.2f};
attr.heights = {0.1f, 0.1f, 0.2f, 0.2f};
attr.variances = {0.1f, 0.1f, 0.2f, 0.2f};
attr.step_widths = 64.0f;
attr.step_heights = 64.0f;
attr.offset = 0.5f;
attr.clip = false;
auto prior_box = std::make_shared<ngraph::opset1::PriorBoxClustered>(data, image, attr);
auto unsqueeze = std::make_shared<ngraph::opset1::Unsqueeze> (prior_box, axes);
ngraph::graph_rewrite_callback callback = [](pattern::Matcher& m) {
auto unsqueeze = std::dynamic_pointer_cast<ngraph::opset1::Unsqueeze> (m.get_match_root());
if (!unsqueeze) {
return false;
}
auto prior_box_node = std::dynamic_pointer_cast<ngraph::opset1::PriorBoxClustered>(unsqueeze->input_value(0).get_node_shared_ptr());
if (!prior_box_node) {
return false;
}
// vector of nGraph nodes that will be replaced
ngraph::NodeVector ops_to_replace{unsqueeze, prior_box_node};
std::shared_ptr<Node> input_1(prior_box_node->input_value(0).get_node_shared_ptr());
std::shared_ptr<Node> input_2(prior_box_node->input_value(1).get_node_shared_ptr());
auto convert1 = std::dynamic_pointer_cast<ngraph::opset1::Convert> (input_1);
auto convert2 = std::dynamic_pointer_cast<ngraph::opset1::Convert> (input_2);
if (convert1 && convert2) {
ops_to_replace.push_back(convert1);
ops_to_replace.push_back(convert2);
input_1 = convert1->input_value(0).get_node_shared_ptr();
input_2 = convert2->input_value(0).get_node_shared_ptr();
}
auto strided_slice1 = std::dynamic_pointer_cast<ngraph::opset1::StridedSlice> (input_1);
auto strided_slice2 = std::dynamic_pointer_cast<ngraph::opset1::StridedSlice> (input_2);
if (!strided_slice1 || !strided_slice2) {
return false;
}
ops_to_replace.push_back(strided_slice1);
ops_to_replace.push_back(strided_slice2);
// Check that StridedSlice1 cuts H,W dims for PriorBox
auto begin = std::dynamic_pointer_cast<ngraph::opset1::Constant> (strided_slice1->input_value(1).get_node_shared_ptr());
auto end = std::dynamic_pointer_cast<ngraph::opset1::Constant> (strided_slice1->input_value(2).get_node_shared_ptr());
auto stride = std::dynamic_pointer_cast<ngraph::opset1::Constant> (strided_slice1->input_value(3).get_node_shared_ptr());
if (!begin || !end || !stride) {
return false;
}
auto begin_val = begin->get_vector<int64_t>();
auto end_val = end->get_vector<int64_t>();
auto stride_val = stride->get_vector<int64_t>();
if (begin_val.size() != 1 && begin_val[0] != 2) {
return false;
}
if (end_val.size() != 1 && end_val[0] != 4) {
return false;
}
if (stride_val.size() != 1 && stride_val[0] != 1) {
return false;
}
// TODO: should we check second StridedSlice?
input_1 = strided_slice1->input_value(0).get_node_shared_ptr();
input_2 = strided_slice2->input_value(0).get_node_shared_ptr();
convert1 = std::dynamic_pointer_cast<ngraph::opset1::Convert> (input_1);
convert2 = std::dynamic_pointer_cast<ngraph::opset1::Convert> (input_2);
if (convert1 && convert2) {
ops_to_replace.push_back(convert1);
ops_to_replace.push_back(convert2);
input_1 = convert1->input_value(0).get_node_shared_ptr();
input_2 = convert2->input_value(0).get_node_shared_ptr();
}
// the input can be either ShapeOf-1 or ShapeOf-3
std::shared_ptr<ngraph::op::Op> shape_of1 = std::dynamic_pointer_cast<ngraph::opset1::ShapeOf> (input_1);
std::shared_ptr<ngraph::op::Op> shape_of2 = std::dynamic_pointer_cast<ngraph::opset1::ShapeOf> (input_2);
if (!shape_of1 || !shape_of2) {
shape_of1 = std::dynamic_pointer_cast<ngraph::opset3::ShapeOf>(input_1);
shape_of2 = std::dynamic_pointer_cast<ngraph::opset3::ShapeOf>(input_2);
}
if (!shape_of1 || !shape_of2) {
return false;
}
// keep this code for a while if will decide to run this transformation again in the opset1->legacy
// the input can be either ShapeOf or Convert(ShapeOf)
// if (!shape_of1 || !shape_of2) {
// auto shapeof1_convert = std::dynamic_pointer_cast<ngraph::opset1::Convert> (input_1);
// auto shapeof2_convert = std::dynamic_pointer_cast<ngraph::opset1::Convert> (input_2);
// if (!shapeof1_convert || !shapeof2_convert)
// return false;
// shape_of1 = std::dynamic_pointer_cast<ngraph::opset1::ShapeOf>(shapeof1_convert->input_value(0).get_node_shared_ptr());
// shape_of2 = std::dynamic_pointer_cast<ngraph::opset1::ShapeOf>(shapeof2_convert->input_value(0).get_node_shared_ptr());
// if (!shape_of1 || !shape_of2)
// return false;
// ops_to_replace.push_back(shapeof1_convert);
// ops_to_replace.push_back(shapeof2_convert);
// }
ops_to_replace.push_back(shape_of1);
ops_to_replace.push_back(shape_of2);
auto prior_box_ie = std::make_shared<ngraph::op::PriorBoxClusteredIE> (shape_of1->input_value(0),
shape_of2->input_value(0),
prior_box_node->get_attrs());
prior_box_ie->set_friendly_name(unsqueeze->get_friendly_name());
// Nodes in copy runtime info function should be in topological order
std::reverse(ops_to_replace.begin(), ops_to_replace.end());
ngraph::copy_runtime_info(ops_to_replace, prior_box_ie);
ngraph::replace_node(unsqueeze, prior_box_ie);
return true;
};
auto m = std::make_shared<ngraph::pattern::Matcher>(unsqueeze, "CPUFusion.ConvertPriorBoxClusteredToPriorBoxClusteredIE");
this->add_matcher(m, callback, PassProperty::CHANGE_DYNAMIC_STATE);
}

View File

@ -4,8 +4,533 @@
#include <string>
#include "ngraph_reader_tests.hpp"
// Todo (itikhono): delete ReadPriorBoxClusteredNetwork and ReadPriorBoxNetwork and replace them with disabled tests
// below after supporting constants as outputs in plugins.
TEST_F(NGraphReaderTests, ReadPriorBoxClusteredNetwork) {
std::string model = R"V0G0N(
<net name="Network" version="10">
<layers>
<layer id="0" name="in1" type="Parameter" version="opset1">
<data element_type="f32" shape="1,768,30,30"/>
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>768</dim>
<dim>30</dim>
<dim>30</dim>
</port>
</output>
</layer>
<layer id="1" name="in2" type="Parameter" version="opset1">
<data element_type="f32" shape="1,3,512,512"/>
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>3</dim>
<dim>512</dim>
<dim>512</dim>
</port>
</output>
</layer>
<layer id="2" name="shape_of1" type="ShapeOf" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>768</dim>
<dim>30</dim>
<dim>30</dim>
</port>
</input>
<output>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="3" name="1344813449_const" type="Const" version="opset1">
<data offset="0" size="8"/>
<output>
<port id="1" precision="I64">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="4" name="1345813459_const" type="Const" version="opset1">
<data offset="8" size="8"/>
<output>
<port id="1" precision="I64">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="9" name="13458134591_const" type="Const" version="opset1">
<data offset="16" size="8"/>
<output>
<port id="1" precision="I64">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="5" name="ss1" type="StridedSlice" version="opset1">
<data begin_mask="0" ellipsis_mask="0" end_mask="0" new_axis_mask="0" shrink_axis_mask="0"/>
<input>
<port id="0" precision="I64">
<dim>4</dim>
</port>
<port id="1" precision="I64">
<dim>1</dim>
</port>
<port id="2" precision="I64">
<dim>1</dim>
</port>
<port id="3" precision="I64">
<dim>1</dim>
</port>
</input>
<output>
<port id="4" precision="I64">
<dim>2</dim>
</port>
</output>
</layer>
<layer id="6" name="shape_of2" type="ShapeOf" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>3</dim>
<dim>512</dim>
<dim>512</dim>
</port>
</input>
<output>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="7" name="ss2" type="StridedSlice" version="opset1">
<data begin_mask="0" ellipsis_mask="0" end_mask="0" new_axis_mask="0" shrink_axis_mask="0"/>
<input>
<port id="0" precision="I64">
<dim>4</dim>
</port>
<port id="1" precision="I64">
<dim>1</dim>
</port>
<port id="2" precision="I64">
<dim>1</dim>
</port>
<port id="3" precision="I64">
<dim>1</dim>
</port>
</input>
<output>
<port id="4" precision="I64">
<dim>2</dim>
</port>
</output>
</layer>
<layer id="8" name="prior" type="PriorBoxClustered" version="opset1">
<data clip="0" flip="0" height="44.0,10.0,30.0,19.0,94.0,32.0,61.0,53.0,17.0" offset="0.5" step="16.0" variance="0.1,0.1,0.2,0.2"
width="86.0,13.0,57.0,39.0,68.0,34.0,142.0,50.0,23.0"/>
<input>
<port id="0" precision="I64">
<dim>2</dim>
</port>
<port id="1" precision="I64">
<dim>2</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>2</dim>
<dim>32400</dim>
</port>
</output>
</layer>
<layer id="12" name="ExpandAxis" type="Const" version="opset1">
<data offset="24" size="8"/>
<output>
<port id="0" precision="I64">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="11" name="ExpandDims" precision="FP32" type="Unsqueeze" version="opset1">
<input>
<port id="0">
<dim>2</dim>
<dim>32400</dim>
</port>
<port id="1">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>2</dim>
<dim>32400</dim>
</port>
</output>
</layer>
<layer id="10" name="output" type="Result" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>2</dim>
<dim>32400</dim>
</port>
</input>
</layer>
</layers>
<edges>
<edge from-layer="0" from-port="0" to-layer="2" to-port="0"/>
<edge from-layer="1" from-port="0" to-layer="6" to-port="0"/>
<edge from-layer="2" from-port="1" to-layer="5" to-port="0"/>
<edge from-layer="6" from-port="1" to-layer="7" to-port="0"/>
<edge from-layer="3" from-port="1" to-layer="5" to-port="1"/>
<edge from-layer="3" from-port="1" to-layer="7" to-port="1"/>
<edge from-layer="4" from-port="1" to-layer="5" to-port="2"/>
<edge from-layer="4" from-port="1" to-layer="7" to-port="2"/>
<edge from-layer="9" from-port="1" to-layer="5" to-port="3"/>
<edge from-layer="9" from-port="1" to-layer="7" to-port="3"/>
<edge from-layer="5" from-port="4" to-layer="8" to-port="0"/>
<edge from-layer="7" from-port="4" to-layer="8" to-port="1"/>
<edge from-layer="8" from-port="2" to-layer="11" to-port="0"/>
<edge from-layer="12" from-port="0" to-layer="11" to-port="1"/>
<edge from-layer="11" from-port="2" to-layer="10" to-port="0"/>
</edges>
</net>
)V0G0N";
std::string modelV5 = R"V0G0N(
<net name="Network" version="5" precision="FP32" batch="1">
<layers>
<layer id="0" name="in1" type="Input" precision="FP32">
<output>
<port id="0">
<dim>1</dim>
<dim>768</dim>
<dim>30</dim>
<dim>30</dim>
</port>
</output>
</layer>
<layer id="1" name="in2" type="Input" precision="FP32">
<output>
<port id="0">
<dim>1</dim>
<dim>3</dim>
<dim>512</dim>
<dim>512</dim>
</port>
</output>
</layer>
<layer name="ExpandDims" id="2" type="PriorBoxClustered" precision="FP32">
<data clip="0" step_h="16.000000" step_w="16.000000" flip="1" height="44,10,30,19,94,32,61,53,17" offset="0.500000" step="16.000000" variance="0.1,0.1,0.2,0.2" width="86,13,57,39,68,34,142,50,23" originalLayersNames="ExpandDims,prior,shape_of1,shape_of2,ss1,ss2"/>
<input>
<port id="1">
<dim>1</dim>
<dim>768</dim>
<dim>30</dim>
<dim>30</dim>
</port>
<port id="2">
<dim>1</dim>
<dim>3</dim>
<dim>512</dim>
<dim>512</dim>
</port>
</input>
<output>
<port id="3">
<dim>1</dim>
<dim>2</dim>
<dim>32400</dim>
</port>
</output>
</layer>
</layers>
<edges>
<edge from-layer="0" from-port="0" to-layer="2" to-port="1"/>
<edge from-layer="1" from-port="0" to-layer="2" to-port="2"/>
</edges>
</net>
)V0G0N";
compareIRs(model, modelV5, 50, [](Blob::Ptr& weights) {
auto* buffer = weights->buffer().as<int64_t*>();
buffer[0] = 2;
buffer[1] = 4;
buffer[2] = 1;
buffer[3] = 0;
});
}
TEST_F(NGraphReaderTests, ReadPriorBoxNetwork) {
std::string model = R"V0G0N(
<net name="Network" version="10">
<layers>
<layer id="0" name="in1" type="Parameter" version="opset1">
<data element_type="f32" shape="1,768,30,30"/>
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>768</dim>
<dim>30</dim>
<dim>30</dim>
</port>
</output>
</layer>
<layer id="1" name="in2" type="Parameter" version="opset1">
<data element_type="f32" shape="1,3,512,512"/>
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>3</dim>
<dim>512</dim>
<dim>512</dim>
</port>
</output>
</layer>
<layer id="2" name="shape_of1" type="ShapeOf" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>768</dim>
<dim>30</dim>
<dim>30</dim>
</port>
</input>
<output>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="3" name="1344813449_const" type="Const" version="opset1">
<data offset="0" size="8"/>
<output>
<port id="1" precision="I64">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="4" name="13458134591_const" type="Const" version="opset1">
<data offset="8" size="8"/>
<output>
<port id="1" precision="I64">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="9" name="1345813459_const" type="Const" version="opset1">
<data offset="16" size="8"/>
<output>
<port id="1" precision="I64">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="5" name="ss1" type="StridedSlice" version="opset1">
<data begin_mask="0" ellipsis_mask="0" end_mask="0" new_axis_mask="0" shrink_axis_mask="0"/>
<input>
<port id="0" precision="I64">
<dim>4</dim>
</port>
<port id="1" precision="I64">
<dim>1</dim>
</port>
<port id="2" precision="I64">
<dim>1</dim>
</port>
<port id="3" precision="I64">
<dim>1</dim>
</port>
</input>
<output>
<port id="4" precision="I64">
<dim>2</dim>
</port>
</output>
</layer>
<layer id="6" name="shape_of2" type="ShapeOf" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>3</dim>
<dim>512</dim>
<dim>512</dim>
</port>
</input>
<output>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="7" name="ss2" type="StridedSlice" version="opset1">
<data begin_mask="0" ellipsis_mask="0" end_mask="0" new_axis_mask="0" shrink_axis_mask="0"/>
<input>
<port id="0" precision="I64">
<dim>4</dim>
</port>
<port id="1" precision="I64">
<dim>1</dim>
</port>
<port id="2" precision="I64">
<dim>1</dim>
</port>
<port id="3" precision="I64">
<dim>1</dim>
</port>
</input>
<output>
<port id="4" precision="I64">
<dim>2</dim>
</port>
</output>
</layer>
<layer id="8" name="prior" type="PriorBox" version="opset1">
<data density="" fixed_ratio="" fixed_size="" aspect_ratio="2.0,0.5" clip="0" flip="0" img_h="0" img_size="0" img_w="0" max_size="" min_size="0.1,0.141421" offset="0.5" scale_all_sizes="0" step="0.03333333" step_h="0" step_w="0" variance="0.100000,0.100000,0.200000,0.200000"/>
<input>
<port id="0" precision="I64">
<dim>2</dim>
</port>
<port id="1" precision="I64">
<dim>2</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>2</dim>
<dim>14400</dim>
</port>
</output>
</layer>
<layer id="12" name="ExpandAxis" type="Const" version="opset1">
<data offset="24" size="8"/>
<output>
<port id="0" precision="I64">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="11" name="ExpandDims" precision="FP32" type="Unsqueeze" version="opset1">
<input>
<port id="0">
<dim>2</dim>
<dim>14400</dim>
</port>
<port id="1">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>2</dim>
<dim>14400</dim>
</port>
</output>
</layer>
<layer id="10" name="output" type="Result" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>2</dim>
<dim>14400</dim>
</port>
</input>
</layer>
</layers>
<edges>
<edge from-layer="0" from-port="0" to-layer="2" to-port="0"/>
<edge from-layer="1" from-port="0" to-layer="6" to-port="0"/>
<edge from-layer="2" from-port="1" to-layer="5" to-port="0"/>
<edge from-layer="6" from-port="1" to-layer="7" to-port="0"/>
<edge from-layer="3" from-port="1" to-layer="5" to-port="1"/>
<edge from-layer="3" from-port="1" to-layer="7" to-port="1"/>
<edge from-layer="4" from-port="1" to-layer="5" to-port="2"/>
<edge from-layer="4" from-port="1" to-layer="7" to-port="2"/>
<edge from-layer="9" from-port="1" to-layer="5" to-port="3"/>
<edge from-layer="9" from-port="1" to-layer="7" to-port="3"/>
<edge from-layer="5" from-port="4" to-layer="8" to-port="0"/>
<edge from-layer="7" from-port="4" to-layer="8" to-port="1"/>
<edge from-layer="8" from-port="2" to-layer="11" to-port="0"/>
<edge from-layer="12" from-port="0" to-layer="11" to-port="1"/>
<edge from-layer="11" from-port="2" to-layer="10" to-port="0"/>
</edges>
</net>
)V0G0N";
std::string modelV5 = R"V0G0N(
<net name="Network" version="5" precision="FP32" batch="1">
<layers>
<layer id="0" name="in1" type="Input" precision="FP32">
<output>
<port id="0">
<dim>1</dim>
<dim>768</dim>
<dim>30</dim>
<dim>30</dim>
</port>
</output>
</layer>
<layer id="1" name="in2" type="Input" precision="FP32">
<output>
<port id="0">
<dim>1</dim>
<dim>3</dim>
<dim>512</dim>
<dim>512</dim>
</port>
</output>
</layer>
<layer name="ExpandDims" id="2" type="PriorBox" precision="FP32">
<data density="" fixed_ratio="" fixed_size="" aspect_ratio="2,0.5" clip="0" flip="0" img_h="0" img_size="0" img_w="0" max_size="" min_size="51.200001,72.407555" offset="0.500000" scale_all_sizes="0" step="17.066666666666666" step_h="0" step_w="0" variance="0.1,0.1,0.2,0.2" originalLayersNames="ExpandDims,prior,shape_of1,shape_of2,ss1,ss2"/>
<input>
<port id="1">
<dim>1</dim>
<dim>768</dim>
<dim>30</dim>
<dim>30</dim>
</port>
<port id="2">
<dim>1</dim>
<dim>3</dim>
<dim>512</dim>
<dim>512</dim>
</port>
</input>
<output>
<port id="3">
<dim>1</dim>
<dim>2</dim>
<dim>14400</dim>
</port>
</output>
</layer>
</layers>
<edges>
<edge from-layer="0" from-port="0" to-layer="2" to-port="1"/>
<edge from-layer="1" from-port="0" to-layer="2" to-port="2"/>
</edges>
</net>
)V0G0N";
compareIRs(model, modelV5, 40, [](Blob::Ptr& weights) {
auto* buffer = weights->buffer().as<int64_t*>();
buffer[0] = 2;
buffer[1] = 4;
buffer[2] = 1;
buffer[3] = 0;
});
}
TEST_F(NGraphReaderTests, DISABLED_ReadPriorBoxClusteredNetwork) {
std::string model = R"V0G0N(
<net name="Network" version="10">
<layers>
<layer id="0" name="in1" type="Parameter" version="opset1">
@ -335,15 +860,15 @@ TEST_F(NGraphReaderTests, ReadPriorBoxClusteredNetwork) {
)V0G0N";
compareIRs(model, modelV5, 259200, [](Blob::Ptr& weights) {
auto* buffer = weights->buffer().as<int64_t*>();
buffer[0] = 2;
buffer[1] = 4;
buffer[2] = 1;
buffer[3] = 0;
});
auto* buffer = weights->buffer().as<int64_t*>();
buffer[0] = 2;
buffer[1] = 4;
buffer[2] = 1;
buffer[3] = 0;
});
}
TEST_F(NGraphReaderTests, ReadPriorBoxNetwork) {
TEST_F(NGraphReaderTests, DISABLED_ReadPriorBoxNetwork) {
std::string model = R"V0G0N(
<net name="Network" version="10">
<layers>
@ -673,10 +1198,10 @@ TEST_F(NGraphReaderTests, ReadPriorBoxNetwork) {
)V0G0N";
compareIRs(model, modelV5, 115200, [](Blob::Ptr& weights) {
auto* buffer = weights->buffer().as<int64_t*>();
buffer[0] = 2;
buffer[1] = 4;
buffer[2] = 1;
buffer[3] = 0;
});
auto* buffer = weights->buffer().as<int64_t*>();
buffer[0] = 2;
buffer[1] = 4;
buffer[2] = 1;
buffer[3] = 0;
});
}

View File

@ -12,5 +12,7 @@ std::vector<std::string> disabledTestPatterns() {
// TODO: FIX BUG 33375
// Disabled due to rare sporadic failures.
".*TransformationTests\\.ConstFoldingPriorBoxClustered.*",
// TODO: task 32568, enable after supporting constants outputs in plugins
".*TransformationTests\\.ConstFoldingPriorBox.*",
};
}

View File

@ -19,6 +19,7 @@
using namespace testing;
TEST(TransformationTests, ConstFoldingPriorBox) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
std::shared_ptr<ngraph::Function> f(nullptr), f_ref(nullptr);
{
@ -110,6 +111,7 @@ TEST(TransformationTests, ConstFoldingPriorBoxClustered) {
}
TEST(TransformationTests, ConstFoldingPriorBoxSubgraph) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
std::shared_ptr<ngraph::Function> f(nullptr), f_ref(nullptr);
{

View File

@ -60,8 +60,8 @@ INSTANTIATE_TEST_CASE_P(PriorBoxClustered_Basic, PriorBoxClusteredLayerTest,
::testing::Combine(
layerSpeficParams,
::testing::ValuesIn(netPrecisions),
::testing::Values(std::vector<size_t>({ 4, 4 })),
::testing::Values(std::vector<size_t>({ 50, 50 })),
::testing::Values(std::vector<size_t>({ 1, 16, 4, 4 })),
::testing::Values(std::vector<size_t>({ 1, 3, 50, 50 })),
::testing::Values(CommonTestUtils::DEVICE_GPU)),
PriorBoxClusteredLayerTest::getTestCaseName
);

View File

@ -20,6 +20,7 @@
#include "functional_test_utils/layer_test_utils.hpp"
#include "single_layer_tests/prior_box_clustered.hpp"
#include "ngraph_ops/prior_box_clustered_ie.hpp"
namespace LayerTestsDefinitions {
std::string PriorBoxClusteredLayerTest::getTestCaseName(const testing::TestParamInfo<priorBoxClusteredLayerParams>& obj) {
@ -156,7 +157,9 @@ void PriorBoxClusteredLayerTest::SetUp() {
variances) = specParams;
auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
auto params = ngraph::builder::makeParams(ngPrc, { inputShapes, imageShapes });
auto paramsIn = ngraph::builder::makeParams(ngPrc, { inputShapes, imageShapes });
auto paramsOut = ngraph::helpers::convert2OutputVector(
ngraph::helpers::castOps2Nodes<ngraph::op::Parameter>(paramsIn));
ngraph::op::PriorBoxClusteredAttrs attributes;
attributes.widths = widths;
@ -167,18 +170,16 @@ void PriorBoxClusteredLayerTest::SetUp() {
attributes.offset = offset;
attributes.variances = variances;
auto shape_of_1 = std::make_shared<ngraph::opset3::ShapeOf>(params[0]);
auto shape_of_2 = std::make_shared<ngraph::opset3::ShapeOf>(params[1]);
auto priorBoxClustered = std::make_shared<ngraph::opset3::PriorBoxClustered>(
shape_of_1,
shape_of_2,
auto priorBoxClustered = std::make_shared<ngraph::op::PriorBoxClusteredIE>(
paramsOut[0],
paramsOut[1],
attributes);
ngraph::ResultVector results{ std::make_shared<ngraph::opset1::Result>(priorBoxClustered) };
function = std::make_shared<ngraph::Function>(results, params, "PB_Clustered");
function = std::make_shared<ngraph::Function>(results, paramsIn, "PB_Clustered");
}
TEST_P(PriorBoxClusteredLayerTest, DISABLED_CompareWithRefs) {
TEST_P(PriorBoxClusteredLayerTest, CompareWithRefs) {
Run();
};
} // namespace LayerTestsDefinitions

View File

@ -214,5 +214,8 @@ bool op::v0::PriorBox::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v0::PriorBox::evaluate");
return evaluate_prior_box(inputs[0], inputs[1], outputs[0], get_attrs());
return false;
// Todo (itikhono): enable the use of the reference implementation after supporting constants as
// outputs in plugins
// return evaluate_prior_box(inputs[0], inputs[1], outputs[0], get_attrs());
}

View File

@ -169,5 +169,8 @@ bool op::v0::PriorBoxClustered::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v0::PriorBoxClustered::evaluate");
return evaluate_prior_box(inputs[0], inputs[1], outputs[0], get_attrs());
return false;
// Todo (itikhono): enable the use of the reference implementation after supporting constants as
// outputs in plugins
// return evaluate_prior_box(inputs[0], inputs[1], outputs[0], get_attrs());
}