Revert prior box constant folding (#906)

* Revert "Const folding and reference implementation for PriorBox(Clustered) ops (#785)"

This reverts commit 9fc818478a.

* apply codestyle for ngraph part
This commit is contained in:
Ivan Tikhonov
2020-06-15 12:38:27 +03:00
committed by GitHub
parent dc1ca195dd
commit 26c563132d
21 changed files with 750 additions and 1029 deletions

View File

@@ -24,6 +24,8 @@
#include "ngraph_ops/pad_ie.hpp"
#include "ngraph_ops/onehot_ie.hpp"
#include "ngraph_ops/power.hpp"
#include "ngraph_ops/prior_box_clustered_ie.hpp"
#include "ngraph_ops/prior_box_ie.hpp"
#include "ngraph_ops/proposal_ie.hpp"
#include "ngraph_ops/relu_ie.hpp"
#include "ngraph_ops/scaleshift.hpp"
@@ -472,20 +474,6 @@ InferenceEngine::details::CNNLayerCreator::CNNLayerCreator(const std::shared_ptr
return res;
});
addSpecificCreator({"PriorBox"}, [](const std::shared_ptr<::ngraph::Node>& node,
const std::map<std::string, std::string> params) -> CNNLayerPtr {
THROW_IE_EXCEPTION << "PriorBox operation has a form that is not supported." << node->get_friendly_name()
<< " should be replaced by constant during constant folding.";
return nullptr;
});
addSpecificCreator({"PriorBoxClustered"}, [](const std::shared_ptr<::ngraph::Node>& node,
const std::map<std::string, std::string> params) -> CNNLayerPtr {
THROW_IE_EXCEPTION << "PriorBoxClustered operation has a form that is not supported." << node->get_friendly_name()
<< " should be replaced by constant during constant folding.";
return nullptr;
});
}
CNNLayerPtr InferenceEngine::details::CNNLayerCreator::create() {
@@ -565,6 +553,10 @@ std::shared_ptr<CNNNetworkImpl> convertFunctionToICNNNetwork(const std::shared_p
std::make_shared<Builder::NodeConverter<::ngraph::op::PadIE>>(),
std::make_shared<Builder::NodeConverter<::ngraph::op::v1::Power>>(),
std::make_shared<Builder::NodeConverter<::ngraph::op::PowerIE>>(),
std::make_shared<Builder::NodeConverter<::ngraph::op::PriorBox>>(),
std::make_shared<Builder::NodeConverter<::ngraph::op::PriorBoxClustered>>(),
std::make_shared<Builder::NodeConverter<::ngraph::op::PriorBoxClusteredIE>>(),
std::make_shared<Builder::NodeConverter<::ngraph::op::PriorBoxIE>>(),
std::make_shared<Builder::NodeConverter<::ngraph::op::Proposal>>(),
std::make_shared<Builder::NodeConverter<::ngraph::op::ProposalIE>>(),
std::make_shared<Builder::NodeConverter<::ngraph::op::Relu>>(),
@@ -889,6 +881,7 @@ std::shared_ptr<CNNNetworkImpl> convertFunctionToICNNNetwork(const std::shared_p
for (const auto &ext : ::ngraph::op::GenericIE::getExtensions(graph)) {
cnnNetworkImpl->AddExtension(ext, nullptr);
}
return cnnNetworkImpl;
}
} // namespace details

View File

@@ -34,6 +34,8 @@
#include "ngraph_ops/onehot_ie.hpp"
#include "ngraph_ops/pad_ie.hpp"
#include "ngraph_ops/power.hpp"
#include "ngraph_ops/prior_box_clustered_ie.hpp"
#include "ngraph_ops/prior_box_ie.hpp"
#include "ngraph_ops/proposal_ie.hpp"
#include "ngraph_ops/relu_ie.hpp"
#include "ngraph_ops/selu_ie.hpp"
@@ -1473,6 +1475,136 @@ CNNLayer::Ptr NodeConverter<ngraph::op::ProposalIE>::createLayer(const std::shar
return res;
}
template <>
CNNLayer::Ptr NodeConverter<ngraph::op::PriorBoxClusteredIE>::createLayer(
const std::shared_ptr<ngraph::Node>& layer) const {
LayerParams params = {layer->get_friendly_name(), "PriorBoxClustered",
details::convertPrecision(layer->get_output_element_type(0))};
auto res = std::make_shared<InferenceEngine::CNNLayer>(params);
auto castedLayer = ngraph::as_type_ptr<ngraph::op::PriorBoxClusteredIE>(layer);
if (castedLayer == nullptr) THROW_IE_EXCEPTION << "Cannot get " << params.type << " layer " << params.name;
auto attr = castedLayer->get_attrs();
std::string param;
for (const auto& val : attr.widths) {
if (!param.empty()) param += ",";
param += asString(val);
}
res->params["width"] = param;
param.clear();
for (const auto& val : attr.heights) {
if (!param.empty()) param += ",";
param += asString(val);
}
res->params["height"] = param;
param.clear();
for (const auto& val : attr.variances) {
if (!param.empty()) param += ",";
param += asString(val);
}
res->params["variance"] = param;
if (std::abs(attr.step_heights - attr.step_widths) < 1e-5) {
res->params["step"] = asString(attr.step_widths);
} else {
res->params["step_w"] = asString(attr.step_widths);
res->params["step_h"] = asString(attr.step_heights);
}
res->params["offset"] = asString(attr.offset);
res->params["clip"] = asString(attr.clip ? 1 : 0);
res->params["flip"] = "1";
return res;
}
template <>
CNNLayer::Ptr NodeConverter<ngraph::op::PriorBoxClustered>::createLayer(
const std::shared_ptr<ngraph::Node>& layer) const {
THROW_IE_EXCEPTION << "PriorBoxClustered operation must be converted to PriorBoxClusteredIE operation.";
}
template <>
CNNLayer::Ptr NodeConverter<ngraph::op::PriorBoxIE>::createLayer(const std::shared_ptr<ngraph::Node>& layer) const {
LayerParams params = {layer->get_friendly_name(), "PriorBox",
details::convertPrecision(layer->get_output_element_type(0))};
auto res = std::make_shared<InferenceEngine::CNNLayer>(params);
auto castedLayer = ngraph::as_type_ptr<ngraph::op::PriorBoxIE>(layer);
auto layer_info = params.type + " layer " + params.name;
if (castedLayer == nullptr) THROW_IE_EXCEPTION << "Cannot get " << layer_info;
auto attr = castedLayer->get_attrs();
std::string param;
auto data_pshape = castedLayer->get_input_partial_shape(0);
if (data_pshape.is_dynamic()) THROW_IE_EXCEPTION << "Dynamic 0-port input of " << layer_info << " is not supported";
auto data_shape = data_pshape.to_shape();
if (data_shape.size() != 4) THROW_IE_EXCEPTION << layer_info << " has " << data_shape.size() << " items in 0-port input, 4 expected";
auto img_pshape = castedLayer->get_input_partial_shape(1);
if (img_pshape.is_dynamic()) THROW_IE_EXCEPTION << "Dynamic 1-port input of " << layer_info << " is not supported";
auto img_shape = img_pshape.to_shape();
if (img_shape.size() != 4) THROW_IE_EXCEPTION << layer_info << " has " << data_shape.size() << " items in 1-port input, 4 expected";
if (!attr.scale_all_sizes) {
// mxnet-like PriorBox
auto img_H = img_shape[2];
auto data_H = data_shape[2];
if (attr.step == -1)
attr.step = 1. * img_H / data_H;
else
attr.step *= img_H;
for (auto& size : attr.min_size)
size *= img_H;
}
for (const auto& val : attr.max_size) {
if (!param.empty()) param += ",";
param += asString(val);
}
res->params["max_size"] = param;
param.clear();
for (const auto& val : attr.min_size) {
if (!param.empty()) param += ",";
param += asString(val);
}
res->params["min_size"] = param;
param.clear();
for (const auto& val : attr.aspect_ratio) {
if (!param.empty()) param += ",";
param += asString(val);
}
res->params["aspect_ratio"] = param;
param.clear();
for (const auto& val : attr.variance) {
if (!param.empty()) param += ",";
param += asString(val);
}
res->params["variance"] = param;
res->params["step"] = asString(attr.step);
res->params["offset"] = asString(attr.offset);
res->params["clip"] = asString(attr.clip ? 1 : 0);
res->params["flip"] = asString(attr.flip ? 1 : 0);
res->params["scale_all_sizes"] = asString(attr.scale_all_sizes ? 1 : 0);
res->params["density"] = asString(attr.density);
res->params["fixed_size"] = asString(attr.fixed_size);
res->params["fixed_ratio"] = asString(attr.fixed_ratio);
return res;
}
template <>
CNNLayer::Ptr NodeConverter<ngraph::op::PriorBox>::createLayer(const std::shared_ptr<ngraph::Node>& layer) const {
THROW_IE_EXCEPTION << "PriorBox operation must be converted to PriorBoxIE operation.";
}
template <>
CNNLayer::Ptr NodeConverter<ngraph::op::PowerIE>::createLayer(const std::shared_ptr<ngraph::Node>& layer) const {
LayerParams params = {layer->get_friendly_name(), "Power",

View File

@@ -0,0 +1,43 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <memory>
#include <transformations_visibility.hpp>
#include <ngraph/op/op.hpp>
#include <ngraph/op/experimental/layers/prior_box_clustered.hpp>
namespace ngraph {
namespace op {
class TRANSFORMATIONS_API PriorBoxClusteredIE : public Op {
public:
static constexpr NodeTypeInfo type_info{"PriorBoxClusteredIE", 1};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs a PriorBoxClusteredIE operation
///
/// \param layer Layer for which prior boxes are computed
/// \param image Input Input to which prior boxes are scaled
/// \param attrs PriorBoxClustered attributes
PriorBoxClusteredIE(const Output<Node>& input,
const Output<Node>& image,
const ngraph::op::PriorBoxClusteredAttrs& attrs);
void validate_and_infer_types() override;
std::shared_ptr<Node> copy_with_new_args(const NodeVector& new_args) const override;
const PriorBoxClusteredAttrs& get_attrs() const { return m_attrs; }
private:
PriorBoxClusteredAttrs m_attrs;
};
} // namespace op
} // namespace ngraph

View File

@@ -0,0 +1,42 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <memory>
#include <transformations_visibility.hpp>
#include "ngraph/op/op.hpp"
#include "ngraph/op/experimental/layers/prior_box.hpp"
namespace ngraph {
namespace op {
class TRANSFORMATIONS_API PriorBoxIE : public Op {
public:
static constexpr NodeTypeInfo type_info{"PriorBoxIE", 1};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs a PriorBoxIE operation
///
/// \param layer Layer for which prior boxes are computed
/// \param image Input Input to which prior boxes are scaled
/// \param attrs PriorBox attributes
PriorBoxIE(const Output<Node>& input,
const Output<Node>& image,
const ngraph::op::PriorBoxAttrs& attrs);
void validate_and_infer_types() override;
std::shared_ptr<Node> copy_with_new_args(const NodeVector& new_args) const override;
const PriorBoxAttrs& get_attrs() const { return m_attrs; }
private:
PriorBoxAttrs m_attrs;
};
} // namespace op
} // namespace ngraph

View File

@@ -16,6 +16,8 @@
// This pass must be called first in pipeline
NGRAPH_PASS(InitNodeInfo, ::ngraph::pass)
NGRAPH_PASS(ConvertPriorBox, ::ngraph::pass) // WA: ConvertPriorBox must be executed before CF
NGRAPH_PASS(ConstantFolding, ::ngraph::pass)
NGRAPH_PASS(RemoveFilteringBoxesBySize, ::ngraph::pass) // Resolves dynamism (replaces NonZero), CF needed
NGRAPH_PASS(ConstantFolding, ::ngraph::pass)
NGRAPH_PASS(StridedSliceOptimization, ::ngraph::pass) // depends on CF

View File

@@ -0,0 +1,33 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <vector>
#include <memory>
#include <transformations_visibility.hpp>
#include <ngraph/pass/graph_rewrite.hpp>
namespace ngraph {
namespace pass {
class TRANSFORMATIONS_API ConvertPriorBox;
} // namespace pass
} // namespace ngraph
class ngraph::pass::ConvertPriorBox: public ngraph::pass::GraphRewrite {
public:
ConvertPriorBox() : GraphRewrite() {
convert_prior_box();
convert_prior_box_clustered();
}
private:
void convert_prior_box();
void convert_prior_box_clustered();
};

View File

@@ -0,0 +1,39 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph_ops/prior_box_clustered_ie.hpp"
#include <memory>
#include "ngraph/op/constant.hpp"
using namespace std;
using namespace ngraph;
constexpr NodeTypeInfo op::PriorBoxClusteredIE::type_info;
op::PriorBoxClusteredIE::PriorBoxClusteredIE(const Output<Node>& input, const Output<Node>& image,
const PriorBoxClusteredAttrs& attrs)
: Op({input, image}), m_attrs(attrs) {
constructor_validate_and_infer_types();
}
void op::PriorBoxClusteredIE::validate_and_infer_types() {
if (get_input_partial_shape(0).is_dynamic() || get_input_partial_shape(1).is_dynamic()) {
set_output_type(0, element::f32, PartialShape::dynamic(3));
return;
}
auto input_shape = get_input_shape(0);
auto image_shape = get_input_shape(1);
size_t num_priors = m_attrs.widths.size();
set_output_type(0, element::f32, Shape {1, 2, 4 * input_shape[2] * input_shape[3] * num_priors});
}
shared_ptr<Node> op::PriorBoxClusteredIE::copy_with_new_args(const NodeVector& new_args) const {
check_new_args_count(this, new_args);
return make_shared<PriorBoxClusteredIE>(new_args.at(0), new_args.at(1), m_attrs);
}

View File

@@ -0,0 +1,36 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph_ops/prior_box_ie.hpp"
#include <memory>
#include "ngraph/op/constant.hpp"
using namespace std;
using namespace ngraph;
constexpr NodeTypeInfo op::PriorBoxIE::type_info;
op::PriorBoxIE::PriorBoxIE(const Output<Node>& input, const Output<Node>& image, const PriorBoxAttrs& attrs)
: Op({input, image}), m_attrs(attrs) {
constructor_validate_and_infer_types();
}
void op::PriorBoxIE::validate_and_infer_types() {
if (get_input_partial_shape(0).is_dynamic() || get_input_partial_shape(1).is_dynamic()) {
set_output_type(0, element::f32, PartialShape::dynamic(3));
return;
}
auto input_shape = get_input_shape(0);
auto image_shape = get_input_shape(1);
set_output_type(0, element::f32, Shape {
1, 2, 4 * input_shape[2] * input_shape[3] * op::PriorBox::number_of_priors(m_attrs)});
}
shared_ptr<Node> op::PriorBoxIE::copy_with_new_args(const NodeVector& new_args) const {
check_new_args_count(this, new_args);
return make_shared<PriorBoxIE>(new_args.at(0), new_args.at(1), m_attrs);
}

View File

@@ -5,6 +5,7 @@
#include <memory>
#include "transformations/common_optimizations/common_optimizations.hpp"
#include "transformations/convert_opset1_to_legacy/convert_prior_to_ie_prior.hpp"
#include "transformations/depth_to_space_fusion.hpp"
#include "transformations/optimize_strided_slice.hpp"
#include "transformations/convert_scatter_elements_to_scatter.hpp"

View File

@@ -0,0 +1,294 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "transformations/convert_opset1_to_legacy/convert_prior_to_ie_prior.hpp"
#include <memory>
#include <vector>
#include <ngraph/opsets/opset3.hpp>
#include <ngraph/opsets/opset1.hpp>
#include <ngraph_ops/prior_box_ie.hpp>
#include <ngraph_ops/prior_box_clustered_ie.hpp>
#include <ngraph/rt_info.hpp>
void ngraph::pass::ConvertPriorBox::convert_prior_box() {
auto data = std::make_shared<pattern::op::Label>(element::i64, Shape{1, 1, 1, 1});
auto axes = ngraph::opset1::Constant::create(element::i64, Shape{1}, {0});
auto image = std::make_shared<pattern::op::Label>(element::i64, Shape{1, 1, 1, 1});
ngraph::op::PriorBoxAttrs attr;
attr.min_size = {162.0f};
attr.max_size = {213.0f};
attr.aspect_ratio = {2.0f, 3.0f};
attr.variance = {0.1f, 0.1f, 0.2f, 0.2f};
attr.step = 64.0f;
attr.offset = 0.5f;
attr.clip = 0;
attr.flip = 1;
attr.scale_all_sizes = true;
auto prior_box = std::make_shared<ngraph::opset1::PriorBox>(data, image, attr);
auto unsqueeze = std::make_shared<ngraph::opset1::Unsqueeze> (prior_box, axes);
ngraph::graph_rewrite_callback callback = [](pattern::Matcher& m) {
auto unsqueeze = std::dynamic_pointer_cast<ngraph::opset1::Unsqueeze> (m.get_match_root());
if (!unsqueeze) {
return false;
}
auto prior_box_node = std::dynamic_pointer_cast<ngraph::opset1::PriorBox> (unsqueeze->input_value(0).get_node_shared_ptr());
if (!prior_box_node) {
return false;
}
// vector of nGraph nodes that will be replaced
ngraph::NodeVector ops_to_replace{unsqueeze, prior_box_node};
std::shared_ptr<Node> input_1(prior_box_node->input_value(0).get_node_shared_ptr());
std::shared_ptr<Node> input_2(prior_box_node->input_value(1).get_node_shared_ptr());
auto convert1 = std::dynamic_pointer_cast<ngraph::opset1::Convert> (input_1);
auto convert2 = std::dynamic_pointer_cast<ngraph::opset1::Convert> (input_2);
if (convert1 && convert2) {
ops_to_replace.push_back(convert1);
ops_to_replace.push_back(convert2);
input_1 = convert1->input_value(0).get_node_shared_ptr();
input_2 = convert2->input_value(0).get_node_shared_ptr();
}
auto strided_slice1 = std::dynamic_pointer_cast<ngraph::opset1::StridedSlice> (input_1);
auto strided_slice2 = std::dynamic_pointer_cast<ngraph::opset1::StridedSlice> (input_2);
if (!strided_slice1 || !strided_slice2) {
return false;
}
ops_to_replace.push_back(strided_slice1);
ops_to_replace.push_back(strided_slice2);
// Check that StridedSlice1 cuts H,W dims for PriorBox
auto begin = std::dynamic_pointer_cast<ngraph::opset1::Constant> (strided_slice1->input_value(1).get_node_shared_ptr());
auto end = std::dynamic_pointer_cast<ngraph::opset1::Constant> (strided_slice1->input_value(2).get_node_shared_ptr());
auto stride = std::dynamic_pointer_cast<ngraph::opset1::Constant> (strided_slice1->input_value(3).get_node_shared_ptr());
if (!begin || !end || !stride) {
return false;
}
auto begin_val = begin->get_vector<int64_t>();
auto end_val = end->get_vector<int64_t>();
auto stride_val = stride->get_vector<int64_t>();
if (begin_val.size() != 1 && begin_val[0] != 2) {
return false;
}
if (end_val.size() != 1 && end_val[0] != 4) {
return false;
}
if (stride_val.size() != 1 && stride_val[0] != 1) {
return false;
}
// TODO: should we check second StridedSlice?
input_1 = strided_slice1->input_value(0).get_node_shared_ptr();
input_2 = strided_slice2->input_value(0).get_node_shared_ptr();
convert1 = std::dynamic_pointer_cast<ngraph::opset1::Convert> (input_1);
convert2 = std::dynamic_pointer_cast<ngraph::opset1::Convert> (input_2);
if (convert1 && convert2) {
ops_to_replace.push_back(convert1);
ops_to_replace.push_back(convert2);
input_1 = convert1->input_value(0).get_node_shared_ptr();
input_2 = convert2->input_value(0).get_node_shared_ptr();
}
// the input can be either ShapeOf-1 or ShapeOf-3
std::shared_ptr<ngraph::op::Op> shape_of1 = std::dynamic_pointer_cast<ngraph::opset1::ShapeOf> (input_1);
std::shared_ptr<ngraph::op::Op> shape_of2 = std::dynamic_pointer_cast<ngraph::opset1::ShapeOf> (input_2);
if (!shape_of1 || !shape_of2) {
shape_of1 = std::dynamic_pointer_cast<ngraph::opset3::ShapeOf>(input_1);
shape_of2 = std::dynamic_pointer_cast<ngraph::opset3::ShapeOf>(input_2);
}
if (!shape_of1 || !shape_of2) {
return false;
}
// keep this code for a while if will decide to run this transformation again in the opset1->legacy
// the input can be either ShapeOf or Convert(ShapeOf)
// if (!shape_of1 || !shape_of2) {
// auto shapeof1_convert = std::dynamic_pointer_cast<ngraph::opset1::Convert> (input_1);
// auto shapeof2_convert = std::dynamic_pointer_cast<ngraph::opset1::Convert> (input_2);
// if (!shapeof1_convert || !shapeof2_convert)
// return false;
// shape_of1 = std::dynamic_pointer_cast<ngraph::opset1::ShapeOf>(shapeof1_convert->input_value(0).get_node_shared_ptr());
// shape_of2 = std::dynamic_pointer_cast<ngraph::opset1::ShapeOf>(shapeof2_convert->input_value(0).get_node_shared_ptr());
// if (!shape_of1 || !shape_of2)
// return false;
// ops_to_replace.push_back(shapeof1_convert);
// ops_to_replace.push_back(shapeof2_convert);
// }
ops_to_replace.push_back(shape_of1);
ops_to_replace.push_back(shape_of2);
auto prior_box_ie = std::make_shared<ngraph::op::PriorBoxIE> (shape_of1->input_value(0),
shape_of2->input_value(0),
prior_box_node->get_attrs());
prior_box_ie->set_friendly_name(unsqueeze->get_friendly_name());
// Nodes in copy runtime info function should be in topological order
std::reverse(ops_to_replace.begin(), ops_to_replace.end());
ngraph::copy_runtime_info(ops_to_replace, prior_box_ie);
ngraph::replace_node(m.get_match_root(), prior_box_ie);
return true;
};
auto m = std::make_shared<ngraph::pattern::Matcher>(unsqueeze, "CPUFusion.ConvertPriorBoxToPriorBoxIE");
this->add_matcher(m, callback, PassProperty::CHANGE_DYNAMIC_STATE);
}
void ngraph::pass::ConvertPriorBox::convert_prior_box_clustered() {
auto data = std::make_shared<pattern::op::Label>(element::i64, Shape{1, 1, 1, 1});
auto axes = ngraph::opset1::Constant::create(element::i64, Shape{1}, {0});
auto image = std::make_shared<pattern::op::Label>(element::i64, Shape{1, 1, 1, 1});
ngraph::op::PriorBoxClusteredAttrs attr;
attr.widths = {0.1f, 0.1f, 0.2f, 0.2f};
attr.heights = {0.1f, 0.1f, 0.2f, 0.2f};
attr.variances = {0.1f, 0.1f, 0.2f, 0.2f};
attr.step_widths = 64.0f;
attr.step_heights = 64.0f;
attr.offset = 0.5f;
attr.clip = false;
auto prior_box = std::make_shared<ngraph::opset1::PriorBoxClustered>(data, image, attr);
auto unsqueeze = std::make_shared<ngraph::opset1::Unsqueeze> (prior_box, axes);
ngraph::graph_rewrite_callback callback = [](pattern::Matcher& m) {
auto unsqueeze = std::dynamic_pointer_cast<ngraph::opset1::Unsqueeze> (m.get_match_root());
if (!unsqueeze) {
return false;
}
auto prior_box_node = std::dynamic_pointer_cast<ngraph::opset1::PriorBoxClustered> (unsqueeze->get_argument(0));
if (!prior_box_node) {
return false;
}
// vector of nGraph nodes that will be replaced
ngraph::NodeVector ops_to_replace{unsqueeze, prior_box_node};
std::shared_ptr<Node> input_1(prior_box_node->input_value(0).get_node_shared_ptr());
std::shared_ptr<Node> input_2(prior_box_node->input_value(1).get_node_shared_ptr());
auto convert1 = std::dynamic_pointer_cast<ngraph::opset1::Convert> (input_1);
auto convert2 = std::dynamic_pointer_cast<ngraph::opset1::Convert> (input_2);
if (convert1 && convert2) {
ops_to_replace.push_back(convert1);
ops_to_replace.push_back(convert2);
input_1 = convert1->input_value(0).get_node_shared_ptr();
input_2 = convert2->input_value(0).get_node_shared_ptr();
}
auto strided_slice1 = std::dynamic_pointer_cast<ngraph::opset1::StridedSlice> (input_1);
auto strided_slice2 = std::dynamic_pointer_cast<ngraph::opset1::StridedSlice> (input_2);
if (!strided_slice1 || !strided_slice2) {
return false;
}
ops_to_replace.push_back(strided_slice1);
ops_to_replace.push_back(strided_slice2);
// Check that StridedSlice1 cuts H,W dims for PriorBox
auto begin = std::dynamic_pointer_cast<ngraph::opset1::Constant> (strided_slice1->get_argument(1));
auto end = std::dynamic_pointer_cast<ngraph::opset1::Constant> (strided_slice1->get_argument(2));
auto stride = std::dynamic_pointer_cast<ngraph::opset1::Constant> (strided_slice1->get_argument(3));
if (!begin || !end || !stride) {
return false;
}
auto begin_val = begin->get_vector<int64_t>();
auto end_val = end->get_vector<int64_t>();
auto stride_val = stride->get_vector<int64_t>();
if (begin_val.size() != 1 && begin_val[0] != 2) {
return false;
}
if (end_val.size() != 1 && end_val[0] != 4) {
return false;
}
if (stride_val.size() != 1 && stride_val[0] != 1) {
return false;
}
// TODO: should we check second StridedSlice?
input_1 = strided_slice1->input_value(0).get_node_shared_ptr();
input_2 = strided_slice2->input_value(0).get_node_shared_ptr();
convert1 = std::dynamic_pointer_cast<ngraph::opset1::Convert> (input_1);
convert2 = std::dynamic_pointer_cast<ngraph::opset1::Convert> (input_2);
if (convert1 && convert2) {
ops_to_replace.push_back(convert1);
ops_to_replace.push_back(convert2);
input_1 = convert1->input_value(0).get_node_shared_ptr();
input_2 = convert2->input_value(0).get_node_shared_ptr();
}
// the input can be either ShapeOf-1 or ShapeOf-3
std::shared_ptr<ngraph::op::Op> shape_of1 = std::dynamic_pointer_cast<ngraph::opset1::ShapeOf> (input_1);
std::shared_ptr<ngraph::op::Op> shape_of2 = std::dynamic_pointer_cast<ngraph::opset1::ShapeOf> (input_2);
if (!shape_of1 || !shape_of2) {
shape_of1 = std::dynamic_pointer_cast<ngraph::opset3::ShapeOf>(input_1);
shape_of2 = std::dynamic_pointer_cast<ngraph::opset3::ShapeOf>(input_2);
}
if (!shape_of1 || !shape_of2) {
return false;
}
// keep this code for a while if will decide to run this transformation again in the opset1->legacy
// the input can be either ShapeOf or Convert(ShapeOf)
// if (!shape_of1 || !shape_of2) {
// auto shapeof1_convert = std::dynamic_pointer_cast<ngraph::opset1::Convert> (input_1);
// auto shapeof2_convert = std::dynamic_pointer_cast<ngraph::opset1::Convert> (input_2);
// if (!shapeof1_convert || !shapeof2_convert)
// return false;
// shape_of1 = std::dynamic_pointer_cast<ngraph::opset1::ShapeOf>(shapeof1_convert->input_value(0).get_node_shared_ptr());
// shape_of2 = std::dynamic_pointer_cast<ngraph::opset1::ShapeOf>(shapeof2_convert->input_value(0).get_node_shared_ptr());
// if (!shape_of1 || !shape_of2)
// return false;
// ops_to_replace.push_back(shapeof1_convert);
// ops_to_replace.push_back(shapeof2_convert);
// }
ops_to_replace.push_back(shape_of1);
ops_to_replace.push_back(shape_of2);
auto prior_box_ie = std::make_shared<ngraph::op::PriorBoxClusteredIE> (shape_of1->get_argument(0),
shape_of2->get_argument(0),
prior_box_node->get_attrs());
prior_box_ie->set_friendly_name(unsqueeze->get_friendly_name());
// Nodes in copy runtime info function should be in topological order
std::reverse(ops_to_replace.begin(), ops_to_replace.end());
ngraph::copy_runtime_info(ops_to_replace, prior_box_ie);
ngraph::replace_node(unsqueeze, prior_box_ie);
return true;
};
auto m = std::make_shared<ngraph::pattern::Matcher>(unsqueeze, "CPUFusion.ConvertPriorBoxClusteredToPriorBoxClusteredIE");
this->add_matcher(m, callback, PassProperty::CHANGE_DYNAMIC_STATE);
}

View File

@@ -30,16 +30,6 @@ TEST_F(NGraphReaderTests, ReadPriorBoxClusteredNetwork) {
</port>
</output>
</layer>
<layer id="15" name="in3" type="Parameter" version="opset1">
<data element_type="f32" shape="1,2,32400"/>
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>2</dim>
<dim>32400</dim>
</port>
</output>
</layer>
<layer id="2" name="shape_of1" type="ShapeOf" version="opset1">
<input>
<port id="0" precision="FP32">
@@ -182,63 +172,19 @@ TEST_F(NGraphReaderTests, ReadPriorBoxClusteredNetwork) {
</port>
</output>
</layer>
<layer name="concat" id="16" type="Concat" version="opset1">
<data axis="1"/>
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>2</dim>
<dim>32400</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>2</dim>
<dim>32400</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>4</dim>
<dim>32400</dim>
</port>
</output>
</layer>
<layer id="10" name="output" type="Result" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>4</dim>
<dim>2</dim>
<dim>32400</dim>
</port>
</input>
</layer>
<layer id="13" name="output_2" type="Result" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>768</dim>
<dim>30</dim>
<dim>30</dim>
</port>
</input>
</layer>
<layer id="14" name="output_3" type="Result" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>3</dim>
<dim>512</dim>
<dim>512</dim>
</port>
</input>
</layer>
</layers>
<edges>
<edge from-layer="0" from-port="0" to-layer="2" to-port="0"/>
<edge from-layer="0" from-port="0" to-layer="13" to-port="0"/>
<edge from-layer="1" from-port="0" to-layer="6" to-port="0"/>
<edge from-layer="1" from-port="0" to-layer="14" to-port="0"/>
<edge from-layer="2" from-port="1" to-layer="5" to-port="0"/>
<edge from-layer="6" from-port="1" to-layer="7" to-port="0"/>
<edge from-layer="3" from-port="1" to-layer="5" to-port="1"/>
@@ -251,90 +197,66 @@ TEST_F(NGraphReaderTests, ReadPriorBoxClusteredNetwork) {
<edge from-layer="7" from-port="4" to-layer="8" to-port="1"/>
<edge from-layer="8" from-port="2" to-layer="11" to-port="0"/>
<edge from-layer="12" from-port="0" to-layer="11" to-port="1"/>
<edge from-layer="11" from-port="2" to-layer="16" to-port="1"/>
<edge from-layer="16" from-port="2" to-layer="10" to-port="0"/>
<edge from-layer="15" from-port="0" to-layer="16" to-port="0"/>
<edge from-layer="11" from-port="2" to-layer="10" to-port="0"/>
</edges>
</net>
)V0G0N";
std::string modelV5 = R"V0G0N(
<net name="Network" version="5" precision="FP32" batch="1">
<layers>
<layer name="in2" type="Input" precision="FP32" id="0">
<data originalLayersNames="in2" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>3</dim>
<dim>512</dim>
<dim>512</dim>
</port>
</output>
</layer>
<layer name="in1" type="Input" precision="FP32" id="1">
<data originalLayersNames="in1" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>768</dim>
<dim>30</dim>
<dim>30</dim>
</port>
</output>
</layer>
<layer name="in3" type="Input" precision="FP32" id="2">
<data originalLayersNames="in3" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>2</dim>
<dim>32400</dim>
</port>
</output>
</layer>
<layer name="Constant_49" type="Const" precision="FP32" id="3">
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>2</dim>
<dim>32400</dim>
</port>
</output>
<blobs>
<custom offset="0" size="259200" precision="FP32" />
</blobs>
</layer>
<layer name="concat" type="Concat" precision="FP32" id="4">
<data axis="1" originalLayersNames="concat" />
<input>
<port id="0">
<dim>1</dim>
<dim>2</dim>
<dim>32400</dim>
</port>
<port id="1">
<dim>1</dim>
<dim>2</dim>
<dim>32400</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>4</dim>
<dim>32400</dim>
</port>
</output>
</layer>
</layers>
<edges>
<edge from-layer="2" from-port="0" to-layer="4" to-port="0" />
<edge from-layer="3" from-port="0" to-layer="4" to-port="1" />
</edges>
<layers>
<layer id="0" name="in1" type="Input" precision="FP32">
<output>
<port id="0">
<dim>1</dim>
<dim>768</dim>
<dim>30</dim>
<dim>30</dim>
</port>
</output>
</layer>
<layer id="1" name="in2" type="Input" precision="FP32">
<output>
<port id="0">
<dim>1</dim>
<dim>3</dim>
<dim>512</dim>
<dim>512</dim>
</port>
</output>
</layer>
<layer name="ExpandDims" id="2" type="PriorBoxClustered" precision="FP32">
<data clip="0" step_h="16.000000" step_w="16.000000" flip="1" height="44,10,30,19,94,32,61,53,17" offset="0.500000" step="16.000000" variance="0.1,0.1,0.2,0.2" width="86,13,57,39,68,34,142,50,23" originalLayersNames="ExpandDims,prior,shape_of1,shape_of2,ss1,ss2"/>
<input>
<port id="1">
<dim>1</dim>
<dim>768</dim>
<dim>30</dim>
<dim>30</dim>
</port>
<port id="2">
<dim>1</dim>
<dim>3</dim>
<dim>512</dim>
<dim>512</dim>
</port>
</input>
<output>
<port id="3">
<dim>1</dim>
<dim>2</dim>
<dim>32400</dim>
</port>
</output>
</layer>
</layers>
<edges>
<edge from-layer="0" from-port="0" to-layer="2" to-port="1"/>
<edge from-layer="1" from-port="0" to-layer="2" to-port="2"/>
</edges>
</net>
)V0G0N";
compareIRs(model, modelV5, 259200, [](Blob::Ptr& weights) {
compareIRs(model, modelV5, 50, [](Blob::Ptr& weights) {
auto* buffer = weights->buffer().as<int64_t*>();
buffer[0] = 2;
buffer[1] = 4;
@@ -369,16 +291,6 @@ TEST_F(NGraphReaderTests, ReadPriorBoxNetwork) {
</port>
</output>
</layer>
<layer id="15" name="in3" type="Parameter" version="opset1">
<data element_type="f32" shape="1,2,14400"/>
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>2</dim>
<dim>14400</dim>
</port>
</output>
</layer>
<layer id="2" name="shape_of1" type="ShapeOf" version="opset1">
<input>
<port id="0" precision="FP32">
@@ -520,63 +432,19 @@ TEST_F(NGraphReaderTests, ReadPriorBoxNetwork) {
</port>
</output>
</layer>
<layer name="concat" id="16" type="Concat" version="opset1">
<data axis="1"/>
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>2</dim>
<dim>14400</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>2</dim>
<dim>14400</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>4</dim>
<dim>14400</dim>
</port>
</output>
</layer>
<layer id="10" name="output" type="Result" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>4</dim>
<dim>2</dim>
<dim>14400</dim>
</port>
</input>
</layer>
<layer id="13" name="output_2" type="Result" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>768</dim>
<dim>30</dim>
<dim>30</dim>
</port>
</input>
</layer>
<layer id="14" name="output_3" type="Result" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>3</dim>
<dim>512</dim>
<dim>512</dim>
</port>
</input>
</layer>
</layers>
<edges>
<edge from-layer="0" from-port="0" to-layer="2" to-port="0"/>
<edge from-layer="0" from-port="0" to-layer="13" to-port="0"/>
<edge from-layer="1" from-port="0" to-layer="6" to-port="0"/>
<edge from-layer="1" from-port="0" to-layer="14" to-port="0"/>
<edge from-layer="2" from-port="1" to-layer="5" to-port="0"/>
<edge from-layer="6" from-port="1" to-layer="7" to-port="0"/>
<edge from-layer="3" from-port="1" to-layer="5" to-port="1"/>
@@ -589,90 +457,66 @@ TEST_F(NGraphReaderTests, ReadPriorBoxNetwork) {
<edge from-layer="7" from-port="4" to-layer="8" to-port="1"/>
<edge from-layer="8" from-port="2" to-layer="11" to-port="0"/>
<edge from-layer="12" from-port="0" to-layer="11" to-port="1"/>
<edge from-layer="11" from-port="2" to-layer="16" to-port="0"/>
<edge from-layer="15" from-port="0" to-layer="16" to-port="1"/>
<edge from-layer="16" from-port="2" to-layer="10" to-port="0"/>
<edge from-layer="11" from-port="2" to-layer="10" to-port="0"/>
</edges>
</net>
)V0G0N";
std::string modelV5 = R"V0G0N(
<net name="Network" version="5" precision="FP32" batch="1">
<layers>
<layer name="in2" type="Input" precision="FP32" id="0">
<data originalLayersNames="in2" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>3</dim>
<dim>512</dim>
<dim>512</dim>
</port>
</output>
</layer>
<layer name="in1" type="Input" precision="FP32" id="1">
<data originalLayersNames="in1" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>768</dim>
<dim>30</dim>
<dim>30</dim>
</port>
</output>
</layer>
<layer name="Constant_49" type="Const" precision="FP32" id="2">
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>2</dim>
<dim>14400</dim>
</port>
</output>
<blobs>
<custom offset="0" size="115200" precision="FP32" />
</blobs>
</layer>
<layer name="in3" type="Input" precision="FP32" id="3">
<data originalLayersNames="in3" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>2</dim>
<dim>14400</dim>
</port>
</output>
</layer>
<layer name="concat" type="Concat" precision="FP32" id="4">
<data axis="1" originalLayersNames="concat" />
<input>
<port id="0">
<dim>1</dim>
<dim>2</dim>
<dim>14400</dim>
</port>
<port id="1">
<dim>1</dim>
<dim>2</dim>
<dim>14400</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>4</dim>
<dim>14400</dim>
</port>
</output>
</layer>
</layers>
<edges>
<edge from-layer="2" from-port="0" to-layer="4" to-port="0" />
<edge from-layer="3" from-port="0" to-layer="4" to-port="1" />
</edges>
<layers>
<layer id="0" name="in1" type="Input" precision="FP32">
<output>
<port id="0">
<dim>1</dim>
<dim>768</dim>
<dim>30</dim>
<dim>30</dim>
</port>
</output>
</layer>
<layer id="1" name="in2" type="Input" precision="FP32">
<output>
<port id="0">
<dim>1</dim>
<dim>3</dim>
<dim>512</dim>
<dim>512</dim>
</port>
</output>
</layer>
<layer name="ExpandDims" id="2" type="PriorBox" precision="FP32">
<data density="" fixed_ratio="" fixed_size="" aspect_ratio="2,0.5" clip="0" flip="0" img_h="0" img_size="0" img_w="0" max_size="" min_size="51.200001,72.407555" offset="0.500000" scale_all_sizes="0" step="17.066666666666666" step_h="0" step_w="0" variance="0.1,0.1,0.2,0.2" originalLayersNames="ExpandDims,prior,shape_of1,shape_of2,ss1,ss2"/>
<input>
<port id="1">
<dim>1</dim>
<dim>768</dim>
<dim>30</dim>
<dim>30</dim>
</port>
<port id="2">
<dim>1</dim>
<dim>3</dim>
<dim>512</dim>
<dim>512</dim>
</port>
</input>
<output>
<port id="3">
<dim>1</dim>
<dim>2</dim>
<dim>14400</dim>
</port>
</output>
</layer>
</layers>
<edges>
<edge from-layer="0" from-port="0" to-layer="2" to-port="1"/>
<edge from-layer="1" from-port="0" to-layer="2" to-port="2"/>
</edges>
</net>
)V0G0N";
compareIRs(model, modelV5, 115200, [](Blob::Ptr& weights) {
compareIRs(model, modelV5, 40, [](Blob::Ptr& weights) {
auto* buffer = weights->buffer().as<int64_t*>();
buffer[0] = 2;
buffer[1] = 4;

View File

@@ -1,218 +0,0 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include "common_test_utils/test_common.hpp"
#include <string>
#include <memory>
#include <ngraph/opsets/opset3.hpp>
#include <ngraph/function.hpp>
#include <transformations/init_node_info.hpp>
#include <ngraph/pass/constant_folding.hpp>
#include <ngraph/ops.hpp>
#include "ngraph_test_utils.hpp"
using namespace testing;
TEST(TransformationTests, ConstFoldingPriorBox) {
std::shared_ptr<ngraph::Function> f(nullptr), f_ref(nullptr);
{
auto in = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::i64, ngraph::Shape{2});
ngraph::op::PriorBoxAttrs attrs;
attrs.min_size = {256.0f};
attrs.max_size = {315.0f};
attrs.aspect_ratio = {2.0f};
attrs.flip = true;
attrs.scale_all_sizes = true;
auto layer_shape = ngraph::opset3::Constant::create<int64_t>(ngraph::element::i64, ngraph::Shape{2}, {1, 1});
auto image_shape = ngraph::opset3::Constant::create<int64_t>(ngraph::element::i64, ngraph::Shape{2}, {300, 300});
auto pb = std::make_shared<ngraph::opset3::PriorBox>(layer_shape, image_shape, attrs);
auto res = std::make_shared<ngraph::opset3::Result>(pb);
f = std::make_shared<ngraph::Function>(ngraph::NodeVector{res}, ngraph::ParameterVector{in});
ngraph::pass::InitNodeInfo().run_on_function(f);
ngraph::pass::ConstantFolding().run_on_function(f);
ASSERT_NO_THROW(check_rt_info(f));
}
{
auto layer_shape = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::i64, ngraph::Shape{2});
auto const_prior_box = ngraph::opset3::Constant::create<float>(ngraph::element::f32, ngraph::Shape{2, 16},
{ -0.426667, -0.426667, 0.426667, 0.426667, -0.473286, -0.473286, 0.473286, 0.473286,
-0.603398, -0.301699, 0.603398, 0.301699, -0.301699, -0.603398, 0.301699, 0.603398,
0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1,
});
auto res = std::make_shared<ngraph::opset3::Result>(const_prior_box);
f_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{res}, ngraph::ParameterVector{layer_shape});
}
auto res = compare_functions(f, f_ref);
ASSERT_TRUE(res.first) << res.second;
auto fused = std::dynamic_pointer_cast<ngraph::opset3::Constant>(f->get_result()->input_value(0).get_node_shared_ptr());
auto ref = std::dynamic_pointer_cast<ngraph::opset3::Constant>(f->get_result()->input_value(0).get_node_shared_ptr());
EXPECT_TRUE(fused != nullptr);
EXPECT_TRUE(ref != nullptr);
EXPECT_TRUE(fused->get_vector<float>() == ref->get_vector<float>());
}
TEST(TransformationTests, ConstFoldingPriorBoxClustered) {
std::shared_ptr<ngraph::Function> f(nullptr), f_ref(nullptr);
{
auto in = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::i64, ngraph::Shape{2});
ngraph::op::PriorBoxClusteredAttrs attrs;
attrs.widths = {4.0f, 2.0f, 3.2f};
attrs.heights = {1.0f, 2.0f, 1.1f};
auto layer_shape = ngraph::opset3::Constant::create<int64_t>(ngraph::element::i64, ngraph::Shape{2}, {2, 2});
auto image_shape = ngraph::opset3::Constant::create<int64_t>(ngraph::element::i64, ngraph::Shape{2}, {300, 300});
auto pb = std::make_shared<ngraph::opset3::PriorBoxClustered>(layer_shape, image_shape, attrs);
auto res = std::make_shared<ngraph::opset3::Result>(pb);
f = std::make_shared<ngraph::Function>(ngraph::NodeVector{res}, ngraph::ParameterVector{in});
ngraph::pass::InitNodeInfo().run_on_function(f);
ngraph::pass::ConstantFolding().run_on_function(f);
ASSERT_NO_THROW(check_rt_info(f));
}
{
auto layer_shape = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::i64, ngraph::Shape{2});
auto const_prior_box = ngraph::opset3::Constant::create<float>(ngraph::element::f32, ngraph::Shape{2, 48},
{ -0.00666667, -0.00166667, 0.00666667, 0.00166667, -0.00333333, -0.00333333, 0.00333333,
0.00333333, -0.00533333, -0.00183333, 0.00533333, 0.00183333, -0.00333333, -0.00166667,
0.01, 0.00166667, 0, -0.00333333, 0.00666667, 0.00333333, -0.002, -0.00183333, 0.00866667,
0.00183333, -0.00666667, 0.00166667, 0.00666667, 0.005, -0.00333333, 0, 0.00333333,
0.00666667, -0.00533333, 0.0015, 0.00533333, 0.00516667, -0.00333333, 0.00166667, 0.01,
0.005, 0, 0, 0.00666667, 0.00666667, -0.002, 0.0015, 0.00866667, 0.00516667, 0.1, 0.1,
0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
});
auto res = std::make_shared<ngraph::opset3::Result>(const_prior_box);
f_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{res}, ngraph::ParameterVector{layer_shape});
}
auto res = compare_functions(f, f_ref);
ASSERT_TRUE(res.first) << res.second;
auto fused = std::dynamic_pointer_cast<ngraph::opset3::Constant>(f->get_result()->input_value(0).get_node_shared_ptr());
auto ref = std::dynamic_pointer_cast<ngraph::opset3::Constant>(f->get_result()->input_value(0).get_node_shared_ptr());
EXPECT_TRUE(fused != nullptr);
EXPECT_TRUE(ref != nullptr);
EXPECT_TRUE(fused->get_vector<float>() == ref->get_vector<float>());
}
TEST(TransformationTests, ConstFoldingPriorBoxSubgraph) {
std::shared_ptr<ngraph::Function> f(nullptr), f_ref(nullptr);
{
auto in = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::i64, ngraph::Shape{2, 3, 1, 1});
auto in_2 = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::i64, ngraph::Shape{2, 3, 300, 300});
ngraph::op::PriorBoxAttrs attrs;
attrs.min_size = {256.0f};
attrs.max_size = {315.0f};
attrs.aspect_ratio = {2.0f};
attrs.flip = true;
attrs.scale_all_sizes = true;
auto layer_shape = std::make_shared<ngraph::opset3::ShapeOf>(in);
auto image_shape = std::make_shared<ngraph::opset3::ShapeOf>(in_2);
auto begin = ngraph::opset3::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {2});
auto end = ngraph::opset3::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {4});
auto stride = ngraph::opset3::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1});
auto ss_data = std::make_shared<ngraph::opset3::StridedSlice>(layer_shape, begin, end, stride,
std::vector<int64_t>{0}, std::vector<int64_t>{0});
auto ss_image = std::make_shared<ngraph::opset3::StridedSlice>(image_shape, begin, end, stride,
std::vector<int64_t>{0}, std::vector<int64_t>{0});
auto pb = std::make_shared<ngraph::opset3::PriorBox>(ss_data, ss_image, attrs);
auto res = std::make_shared<ngraph::opset3::Result>(pb);
f = std::make_shared<ngraph::Function>(ngraph::NodeVector{res}, ngraph::ParameterVector{in, in_2});
ngraph::pass::InitNodeInfo().run_on_function(f);
ngraph::pass::ConstantFolding().run_on_function(f);
ASSERT_NO_THROW(check_rt_info(f));
}
{
auto layer_shape = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::i64, ngraph::Shape{2});
auto const_prior_box = ngraph::opset3::Constant::create<float>(ngraph::element::f32, ngraph::Shape{2, 16},
{ -0.426667, -0.426667, 0.426667, 0.426667, -0.473286, -0.473286, 0.473286, 0.473286,
-0.603398, -0.301699, 0.603398, 0.301699, -0.301699, -0.603398, 0.301699, 0.603398,
0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1
});
auto res = std::make_shared<ngraph::opset3::Result>(const_prior_box);
f_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{res}, ngraph::ParameterVector{layer_shape});
}
auto res = compare_functions(f, f_ref);
ASSERT_TRUE(res.first) << res.second;
auto fused = std::dynamic_pointer_cast<ngraph::opset3::Constant>(f->get_result()->input_value(0).get_node_shared_ptr());
auto ref = std::dynamic_pointer_cast<ngraph::opset3::Constant>(f->get_result()->input_value(0).get_node_shared_ptr());
EXPECT_TRUE(fused != nullptr);
EXPECT_TRUE(ref != nullptr);
EXPECT_TRUE(fused->get_vector<float>() == ref->get_vector<float>());
}
TEST(TransformationTests, ConstFoldingPriorBoxClusteredSubgraph) {
std::shared_ptr<ngraph::Function> f(nullptr), f_ref(nullptr);
{
auto in = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::i64, ngraph::Shape{2, 3, 2, 2});
auto in_2 = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::i64, ngraph::Shape{2, 3, 300, 300});
ngraph::op::PriorBoxClusteredAttrs attrs;
attrs.widths = {4.0f, 2.0f, 3.2f};
attrs.heights = {1.0f, 2.0f, 1.1f};
auto layer_shape = std::make_shared<ngraph::opset3::ShapeOf>(in);
auto image_shape = std::make_shared<ngraph::opset3::ShapeOf>(in_2);
auto begin = ngraph::opset3::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {2});
auto end = ngraph::opset3::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {4});
auto stride = ngraph::opset3::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1});
auto ss_data = std::make_shared<ngraph::opset3::StridedSlice>(layer_shape, begin, end, stride,
std::vector<int64_t>{0}, std::vector<int64_t>{0});
auto ss_image = std::make_shared<ngraph::opset3::StridedSlice>(image_shape, begin, end, stride,
std::vector<int64_t>{0}, std::vector<int64_t>{0});
auto pb = std::make_shared<ngraph::opset3::PriorBoxClustered>(ss_data, ss_image, attrs);
auto res = std::make_shared<ngraph::opset3::Result>(pb);
f = std::make_shared<ngraph::Function>(ngraph::NodeVector{res}, ngraph::ParameterVector{in, in_2});
ngraph::pass::InitNodeInfo().run_on_function(f);
ngraph::pass::ConstantFolding().run_on_function(f);
ASSERT_NO_THROW(check_rt_info(f));
}
{
auto layer_shape = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::i64, ngraph::Shape{2});
auto const_prior_box = ngraph::opset3::Constant::create<float>(ngraph::element::f32, ngraph::Shape{2, 48},
{ -0.00666667, -0.00166667, 0.00666667, 0.00166667, -0.00333333, -0.00333333, 0.00333333,
0.00333333, -0.00533333, -0.00183333, 0.00533333, 0.00183333, -0.00333333, -0.00166667,
0.01, 0.00166667, 0, -0.00333333, 0.00666667, 0.00333333, -0.002, -0.00183333, 0.00866667,
0.00183333, -0.00666667, 0.00166667, 0.00666667, 0.005, -0.00333333, 0, 0.00333333,
0.00666667, -0.00533333, 0.0015, 0.00533333, 0.00516667, -0.00333333, 0.00166667, 0.01,
0.005, 0, 0, 0.00666667, 0.00666667, -0.002, 0.0015, 0.00866667, 0.00516667, 0.1, 0.1,
0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
});
auto res = std::make_shared<ngraph::opset3::Result>(const_prior_box);
f_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{res}, ngraph::ParameterVector{layer_shape});
}
auto res = compare_functions(f, f_ref);
ASSERT_TRUE(res.first) << res.second;
auto fused = std::dynamic_pointer_cast<ngraph::opset3::Constant>(f->get_result()->input_value(0).get_node_shared_ptr());
auto ref = std::dynamic_pointer_cast<ngraph::opset3::Constant>(f->get_result()->input_value(0).get_node_shared_ptr());
EXPECT_TRUE(fused != nullptr);
EXPECT_TRUE(ref != nullptr);
EXPECT_TRUE(fused->get_vector<float>() == ref->get_vector<float>());
}

View File

@@ -60,8 +60,8 @@ INSTANTIATE_TEST_CASE_P(PriorBoxClustered_Basic, PriorBoxClusteredLayerTest,
::testing::Combine(
layerSpeficParams,
::testing::ValuesIn(netPrecisions),
::testing::Values(std::vector<size_t>({ 4, 4 })),
::testing::Values(std::vector<size_t>({ 50, 50 })),
::testing::Values(std::vector<size_t>({ 1, 16, 4, 4 })),
::testing::Values(std::vector<size_t>({ 1, 3, 50, 50 })),
::testing::Values(CommonTestUtils::DEVICE_GPU)),
PriorBoxClusteredLayerTest::getTestCaseName
);

View File

@@ -20,6 +20,7 @@
#include "functional_test_utils/layer_test_utils.hpp"
#include "single_layer_tests/prior_box_clustered.hpp"
#include "ngraph_ops/prior_box_clustered_ie.hpp"
namespace LayerTestsDefinitions {
std::string PriorBoxClusteredLayerTest::getTestCaseName(const testing::TestParamInfo<priorBoxClusteredLayerParams>& obj) {
@@ -156,7 +157,9 @@ void PriorBoxClusteredLayerTest::SetUp() {
variances) = specParams;
auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
auto params = ngraph::builder::makeParams(ngPrc, { inputShapes, imageShapes });
auto paramsIn = ngraph::builder::makeParams(ngPrc, { inputShapes, imageShapes });
auto paramsOut = ngraph::helpers::convert2OutputVector(
ngraph::helpers::castOps2Nodes<ngraph::op::Parameter>(paramsIn));
ngraph::op::PriorBoxClusteredAttrs attributes;
attributes.widths = widths;
@@ -167,18 +170,16 @@ void PriorBoxClusteredLayerTest::SetUp() {
attributes.offset = offset;
attributes.variances = variances;
auto shape_of_1 = std::make_shared<ngraph::opset3::ShapeOf>(params[0]);
auto shape_of_2 = std::make_shared<ngraph::opset3::ShapeOf>(params[1]);
auto priorBoxClustered = std::make_shared<ngraph::opset3::PriorBoxClustered>(
shape_of_1,
shape_of_2,
auto priorBoxClustered = std::make_shared<ngraph::op::PriorBoxClusteredIE>(
paramsOut[0],
paramsOut[1],
attributes);
ngraph::ResultVector results{ std::make_shared<ngraph::opset1::Result>(priorBoxClustered) };
function = std::make_shared<ngraph::Function>(results, params, "PB_Clustered");
function = std::make_shared<ngraph::Function>(results, paramsIn, "PB_Clustered");
}
TEST_P(PriorBoxClusteredLayerTest, DISABLED_CompareWithRefs) {
TEST_P(PriorBoxClusteredLayerTest, CompareWithRefs) {
Run();
};
} // namespace LayerTestsDefinitions

View File

@@ -15,10 +15,8 @@
//*****************************************************************************
#include "ngraph/op/prior_box.hpp"
#include "ngraph/op/constant.hpp"
#include "ngraph/runtime/host_tensor.hpp"
#include "ngraph/runtime/reference/prior_box.hpp"
#include "ngraph/op/constant.hpp"
using namespace std;
using namespace ngraph;
@@ -113,6 +111,7 @@ int64_t op::PriorBox::number_of_priors(const PriorBoxAttrs& attrs)
else
num_priors += total_aspect_ratios * density_2d;
}
return num_priors;
}
@@ -146,54 +145,3 @@ bool op::PriorBox::visit_attributes(AttributeVisitor& visitor)
visitor.on_attribute("attrs.scale_all_sizes", m_attrs.scale_all_sizes);
return true;
}
namespace
{
template <element::Type_t ET>
bool evaluate(const HostTensorPtr& arg0,
const HostTensorPtr& arg1,
const HostTensorPtr& out,
op::PriorBoxAttrs attrs)
{
runtime::reference::prior_box(arg0->get_data_ptr<ET>(),
arg1->get_data_ptr<ET>(),
out->get_data_ptr<float>(),
out->get_shape(),
attrs);
return true;
}
bool evaluate_prior_box(const HostTensorPtr& arg0,
const HostTensorPtr& arg1,
const HostTensorPtr& out,
const op::PriorBoxAttrs& attrs)
{
bool rc = true;
switch (arg0->get_element_type())
{
TYPE_CASE(i8)(arg0, arg1, out, attrs);
break;
TYPE_CASE(i16)(arg0, arg1, out, attrs);
break;
TYPE_CASE(i32)(arg0, arg1, out, attrs);
break;
TYPE_CASE(i64)(arg0, arg1, out, attrs);
break;
TYPE_CASE(u8)(arg0, arg1, out, attrs);
break;
TYPE_CASE(u16)(arg0, arg1, out, attrs);
break;
TYPE_CASE(u32)(arg0, arg1, out, attrs);
break;
TYPE_CASE(u64)(arg0, arg1, out, attrs);
break;
default: rc = false; break;
}
return rc;
}
}
bool op::v0::PriorBox::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs)
{
return evaluate_prior_box(inputs[0], inputs[1], outputs[0], get_attrs());
}

View File

@@ -76,8 +76,6 @@ namespace ngraph
normalized_aspect_ratio(const std::vector<float>& aspect_ratio, bool flip);
const PriorBoxAttrs& get_attrs() const { return m_attrs; }
virtual bool visit_attributes(AttributeVisitor& visitor) override;
bool evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) override;
private:
PriorBoxAttrs m_attrs;

View File

@@ -15,10 +15,8 @@
//*****************************************************************************
#include "ngraph/op/prior_box_clustered.hpp"
#include "ngraph/op/constant.hpp"
#include "ngraph/runtime/host_tensor.hpp"
#include "ngraph/runtime/reference/prior_box_clustered.hpp"
#include "ngraph/op/constant.hpp"
using namespace std;
using namespace ngraph;
@@ -103,55 +101,3 @@ bool op::PriorBoxClustered::visit_attributes(AttributeVisitor& visitor)
visitor.on_attribute("attrs.variances", m_attrs.variances);
return true;
}
namespace
{
template <element::Type_t ET>
bool evaluate(const HostTensorPtr& arg0,
const HostTensorPtr& arg1,
const HostTensorPtr& out,
op::PriorBoxClusteredAttrs attrs)
{
runtime::reference::prior_box_clustered(arg0->get_data_ptr<ET>(),
arg1->get_data_ptr<ET>(),
out->get_data_ptr<float>(),
out->get_shape(),
attrs);
return true;
}
bool evaluate_prior_box(const HostTensorPtr& arg0,
const HostTensorPtr& arg1,
const HostTensorPtr& out,
const op::PriorBoxClusteredAttrs& attrs)
{
bool rc = true;
switch (arg0->get_element_type())
{
TYPE_CASE(i8)(arg0, arg1, out, attrs);
break;
TYPE_CASE(i16)(arg0, arg1, out, attrs);
break;
TYPE_CASE(i32)(arg0, arg1, out, attrs);
break;
TYPE_CASE(i64)(arg0, arg1, out, attrs);
break;
TYPE_CASE(u8)(arg0, arg1, out, attrs);
break;
TYPE_CASE(u16)(arg0, arg1, out, attrs);
break;
TYPE_CASE(u32)(arg0, arg1, out, attrs);
break;
TYPE_CASE(u64)(arg0, arg1, out, attrs);
break;
default: rc = false; break;
}
return rc;
}
}
bool op::v0::PriorBoxClustered::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs)
{
return evaluate_prior_box(inputs[0], inputs[1], outputs[0], get_attrs());
}

View File

@@ -64,8 +64,6 @@ namespace ngraph
clone_with_new_inputs(const OutputVector& new_args) const override;
const PriorBoxClusteredAttrs& get_attrs() const { return m_attrs; }
virtual bool visit_attributes(AttributeVisitor& visitor) override;
bool evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) override;
private:
PriorBoxClusteredAttrs m_attrs;

View File

@@ -71,6 +71,7 @@ public:
{
m_cfmap = cfmap;
m_enable_shape_inference = true;
construct_constant_split();
construct_constant_variadic_split();
construct_constant_dyn_broadcast();

View File

@@ -1,296 +0,0 @@
//*****************************************************************************
// Copyright 2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include <cmath>
#include "ngraph/axis_vector.hpp"
#include "ngraph/check.hpp"
#include "ngraph/coordinate_transform.hpp"
#include "ngraph/op/prior_box.hpp"
namespace ngraph
{
namespace runtime
{
namespace reference
{
static inline float clip_great(float x, float threshold)
{
return x < threshold ? x : threshold;
}
static inline float clip_less(float x, float threshold)
{
return x > threshold ? x : threshold;
}
template <typename T>
void prior_box(const T* data,
const T* img,
float* dst_data,
const Shape& out_shape,
const op::PriorBoxAttrs& attrs)
{
const int64_t W = data[1];
const int64_t H = data[0];
const int64_t IW = img[1];
const int64_t IH = img[0];
const int64_t OH = out_shape[1];
const int64_t OW = 1;
std::vector<float> aspect_ratios = {1.0f};
for (const auto& aspect_ratio : attrs.aspect_ratio)
{
bool exist = false;
for (const auto existed_value : aspect_ratios)
exist |= std::fabs(aspect_ratio - existed_value) < 1e-6;
if (!exist)
{
aspect_ratios.push_back(aspect_ratio);
if (attrs.flip)
{
aspect_ratios.push_back(1.0f / aspect_ratio);
}
}
}
std::vector<float> variance = attrs.variance;
NGRAPH_CHECK(variance.size() == 1 || variance.size() == 4 || variance.empty());
if (variance.empty())
variance.push_back(0.1f);
int64_t num_priors = op::PriorBox::number_of_priors(attrs);
float step = attrs.step;
auto min_size = attrs.min_size;
if (!attrs.scale_all_sizes)
{
// mxnet-like PriorBox
if (step == -1)
step = 1.f * IH / H;
else
step *= IH;
for (auto& size : min_size)
size *= IH;
}
int64_t idx = 0;
float center_x, center_y, box_width, box_height, step_x, step_y;
float IWI = 1.0f / static_cast<float>(IW);
float IHI = 1.0f / static_cast<float>(IH);
if (step == 0)
{
step_x = static_cast<float>(IW) / W;
step_y = static_cast<float>(IH) / H;
}
else
{
step_x = step;
step_y = step;
}
auto calculate_data = [&dst_data, &IWI, &IHI, &idx](
float center_x, float center_y, float box_width, float box_height, bool clip) {
if (clip)
{
// order: xmin, ymin, xmax, ymax
dst_data[idx++] = clip_less((center_x - box_width) * IWI, 0);
dst_data[idx++] = clip_less((center_y - box_height) * IHI, 0);
dst_data[idx++] = clip_great((center_x + box_width) * IWI, 1);
dst_data[idx++] = clip_great((center_y + box_height) * IHI, 1);
}
else
{
dst_data[idx++] = (center_x - box_width) * IWI;
dst_data[idx++] = (center_y - box_height) * IHI;
dst_data[idx++] = (center_x + box_width) * IWI;
dst_data[idx++] = (center_y + box_height) * IHI;
}
};
for (int64_t h = 0; h < H; ++h)
{
for (int64_t w = 0; w < W; ++w)
{
if (step == 0)
{
center_x = (w + 0.5f) * step_x;
center_y = (h + 0.5f) * step_y;
}
else
{
center_x = (attrs.offset + w) * step;
center_y = (attrs.offset + h) * step;
}
for (size_t s = 0; s < attrs.fixed_size.size(); ++s)
{
auto fixed_size_ = static_cast<size_t>(attrs.fixed_size[s]);
box_width = box_height = fixed_size_ * 0.5f;
if (!attrs.fixed_ratio.empty())
{
for (float ar : attrs.fixed_ratio)
{
auto density_ = static_cast<int64_t>(attrs.density[s]);
auto shift =
static_cast<int64_t>(attrs.fixed_size[s] / density_);
ar = std::sqrt(ar);
float box_width_ratio = attrs.fixed_size[s] * 0.5f * ar;
float box_height_ratio = attrs.fixed_size[s] * 0.5f / ar;
for (size_t r = 0; r < density_; ++r)
{
for (size_t c = 0; c < density_; ++c)
{
float center_x_temp = center_x - fixed_size_ / 2 +
shift / 2.f + c * shift;
float center_y_temp = center_y - fixed_size_ / 2 +
shift / 2.f + r * shift;
calculate_data(center_x_temp,
center_y_temp,
box_width_ratio,
box_height_ratio,
true);
}
}
}
}
else
{
if (!attrs.density.empty())
{
auto density_ = static_cast<int64_t>(attrs.density[s]);
auto shift =
static_cast<int64_t>(attrs.fixed_size[s] / density_);
for (int64_t r = 0; r < density_; ++r)
{
for (int64_t c = 0; c < density_; ++c)
{
float center_x_temp = center_x - fixed_size_ / 2 +
shift / 2.f + c * shift;
float center_y_temp = center_y - fixed_size_ / 2 +
shift / 2.f + r * shift;
calculate_data(center_x_temp,
center_y_temp,
box_width,
box_height,
true);
}
}
}
// Rest of priors
for (float ar : aspect_ratios)
{
if (fabs(ar - 1.) < 1e-6)
{
continue;
}
auto density_ = static_cast<int64_t>(attrs.density[s]);
auto shift =
static_cast<int64_t>(attrs.fixed_size[s] / density_);
ar = std::sqrt(ar);
float box_width_ratio = attrs.fixed_size[s] * 0.5f * ar;
float box_height_ratio = attrs.fixed_size[s] * 0.5f / ar;
for (int64_t r = 0; r < density_; ++r)
{
for (int64_t c = 0; c < density_; ++c)
{
float center_x_temp = center_x - fixed_size_ / 2 +
shift / 2.f + c * shift;
float center_y_temp = center_y - fixed_size_ / 2 +
shift / 2.f + r * shift;
calculate_data(center_x_temp,
center_y_temp,
box_width_ratio,
box_height_ratio,
true);
}
}
}
}
}
for (size_t ms_idx = 0; ms_idx < min_size.size(); ms_idx++)
{
box_width = min_size[ms_idx] * 0.5f;
box_height = min_size[ms_idx] * 0.5f;
calculate_data(center_x, center_y, box_width, box_height, false);
if (attrs.max_size.size() > ms_idx)
{
box_width = box_height =
std::sqrt(min_size[ms_idx] * attrs.max_size[ms_idx]) * 0.5f;
calculate_data(center_x, center_y, box_width, box_height, false);
}
if (attrs.scale_all_sizes ||
(!attrs.scale_all_sizes && (ms_idx == min_size.size() - 1)))
{
size_t s_idx = attrs.scale_all_sizes ? ms_idx : 0;
for (float ar : aspect_ratios)
{
if (std::fabs(ar - 1.0f) < 1e-6)
{
continue;
}
ar = std::sqrt(ar);
box_width = min_size[s_idx] * 0.5f * ar;
box_height = min_size[s_idx] * 0.5f / ar;
calculate_data(
center_x, center_y, box_width, box_height, false);
}
}
}
}
}
if (attrs.clip)
{
for (uint64_t i = 0; i < H * W * num_priors * 4; ++i)
{
dst_data[i] = (std::min)((std::max)(dst_data[i], 0.0f), 1.0f);
}
}
uint64_t channel_size = OH * OW;
if (variance.size() == 1)
{
for (uint64_t i = 0; i < channel_size; ++i)
{
dst_data[i + channel_size] = variance[0];
}
}
else
{
for (uint64_t i = 0; i < H * W * num_priors; ++i)
{
for (size_t j = 0; j < 4; ++j)
{
dst_data[i * 4 + j + channel_size] = variance[j];
}
}
}
}
}
}
}

View File

@@ -1,116 +0,0 @@
//*****************************************************************************
// Copyright 2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include <cmath>
#include "ngraph/axis_vector.hpp"
#include "ngraph/check.hpp"
#include "ngraph/coordinate_transform.hpp"
#include "ngraph/op/prior_box_clustered.hpp"
namespace ngraph
{
namespace runtime
{
namespace reference
{
template <typename T>
void prior_box_clustered(const T* data,
const T* img,
float* dst_data,
const Shape& out_shape,
const op::PriorBoxClusteredAttrs& attrs)
{
size_t num_priors_ = attrs.widths.size();
auto variances = attrs.variances;
if (variances.empty())
variances.push_back(0.1f);
// Execute
const int64_t layer_width = data[1];
const int64_t layer_height = data[0];
int64_t img_width = img[1];
int64_t img_height = img[0];
// TODO: Uncomment after PriorBoxClustered is aligned with the specification.
// int img_width = img_w_ == 0 ? img[1] : img_w_;
// int img_height = img_h_ == 0 ? img[0] : img_h_;
// float step_w = attrs.step_widths == 0 ? step_ : attrs.step_widths;
// float step_h = attrs.step_heights == 0 ? step_ :
// attrs.step_heights;
float step_w = attrs.step_widths;
float step_h = attrs.step_heights;
if (step_w == 0 && step_h == 0)
{
step_w = static_cast<float>(img_width) / layer_width;
step_h = static_cast<float>(img_height) / layer_height;
}
size_t var_size = variances.size();
for (int64_t h = 0; h < layer_height; ++h)
{
for (int64_t w = 0; w < layer_width; ++w)
{
float center_x = (w + attrs.offset) * step_w;
float center_y = (h + attrs.offset) * step_h;
for (size_t s = 0; s < num_priors_; ++s)
{
float box_width = attrs.widths[s];
float box_height = attrs.heights[s];
float xmin = (center_x - box_width / 2.0f) / img_width;
float ymin = (center_y - box_height / 2.0f) / img_height;
float xmax = (center_x + box_width / 2.0f) / img_width;
float ymax = (center_y + box_height / 2.0f) / img_height;
if (attrs.clip)
{
xmin = (std::min)((std::max)(xmin, 0.0f), 1.0f);
ymin = (std::min)((std::max)(ymin, 0.0f), 1.0f);
xmax = (std::min)((std::max)(xmax, 0.0f), 1.0f);
ymax = (std::min)((std::max)(ymax, 0.0f), 1.0f);
}
auto get_idx = [&](uint64_t cnt) -> uint64_t {
return h * layer_width * num_priors_ * cnt + w * num_priors_ * cnt +
s * cnt;
};
uint64_t idx = get_idx(4);
dst_data[idx + 0] = xmin;
dst_data[idx + 1] = ymin;
dst_data[idx + 2] = xmax;
dst_data[idx + 3] = ymax;
idx = get_idx(var_size);
for (size_t j = 0; j < var_size; j++)
dst_data[idx + j + out_shape[1]] = variances[j];
}
}
}
}
}
}
}