Add support for custom onnx operators: DetectionOutput, Normalize and… (#2064)

This commit is contained in:
Mateusz Tabaka 2020-09-10 11:27:12 +02:00 committed by GitHub
parent 362080b5be
commit 926be83568
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
28 changed files with 945 additions and 27 deletions

View File

@ -27,6 +27,8 @@ namespace ngraph
{
namespace onnx_import
{
std::string get_node_domain(const ONNX_NAMESPACE::NodeProto& node_proto);
class Model
{
public:

View File

@ -0,0 +1,38 @@
//*****************************************************************************
// Copyright 2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include "ngraph/node.hpp"
#include "onnx_import/core/node.hpp"
namespace ngraph
{
namespace onnx_import
{
namespace op
{
namespace set_1
{
OutputVector detection_output(const Node& node);
} // namespace set_1
} // namespace op
} // namespace onnx_import
} // namespace ngraph

View File

@ -0,0 +1,37 @@
//*****************************************************************************
// Copyright 2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include "ngraph/node.hpp"
#include "onnx_import/core/node.hpp"
namespace ngraph
{
namespace onnx_import
{
namespace op
{
namespace set_1
{
OutputVector normalize(const Node& node);
} // namespace set_1
} // namespace op
} // namespace onnx_import
} // namespace ngraph

View File

@ -0,0 +1,38 @@
//*****************************************************************************
// Copyright 2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include "ngraph/node.hpp"
#include "onnx_import/core/node.hpp"
namespace ngraph
{
namespace onnx_import
{
namespace op
{
namespace set_1
{
OutputVector prior_box(const Node& node);
} // namespace set_1
} // namespace op
} // namespace onnx_import
} // namespace ngraph

View File

@ -129,6 +129,8 @@ namespace ngraph
const std::string& domain);
};
const std::string OPENVINO_ONNX_DOMAIN = "org.openvinotoolkit";
} // namespace onnx_import
} // namespace ngraph

View File

@ -46,11 +46,6 @@ namespace ngraph
return result;
}
static std::string get_node_domain(const ONNX_NAMESPACE::NodeProto& node_proto)
{
return (node_proto.domain().empty() ? "" : node_proto.domain());
}
/// \brief Gets the operator represented by provided node unique identificator.
///
/// \param[in] node_proto The node protobuf representation object.
@ -142,7 +137,7 @@ namespace ngraph
node_proto);
// If a node from an unregistered domain is detected, try registering that
// domain
m_model->enable_opset_domain(detail::get_node_domain(node_proto));
m_model->enable_opset_domain(get_node_domain(node_proto));
}
}

View File

@ -24,6 +24,11 @@ namespace ngraph
{
namespace onnx_import
{
std::string get_node_domain(const ONNX_NAMESPACE::NodeProto& node_proto)
{
return node_proto.has_domain() ? node_proto.domain() : "";
}
Model::Model(const ONNX_NAMESPACE::ModelProto& model_proto)
: m_model_proto{&model_proto}
{
@ -32,9 +37,8 @@ namespace ngraph
// unknown or invalid.
for (const auto& id : m_model_proto->opset_import())
{
m_opset.emplace(id.domain(),
OperatorsBridge::get_operator_set(
(id.domain() == "ai.onnx" ? "" : id.domain()), id.version()));
auto domain = id.has_domain() ? id.domain() : "";
m_opset.emplace(domain, OperatorsBridge::get_operator_set(domain, id.version()));
}
// onnx.proto(.3): the empty string ("") for domain or absence of opset_import field
// implies the operator set that is defined as part of the ONNX specification.
@ -63,7 +67,7 @@ namespace ngraph
bool Model::is_operator_available(const ONNX_NAMESPACE::NodeProto& node_proto) const
{
const auto dm = m_opset.find(node_proto.domain());
const auto dm = m_opset.find(get_node_domain(node_proto));
if (dm == std::end(m_opset))
{
return false;

View File

@ -33,6 +33,8 @@ namespace ngraph
Impl(const ONNX_NAMESPACE::NodeProto& node_proto, const Graph& graph)
: m_node_proto{&node_proto}
, m_name{node_proto.has_name() ? node_proto.name() : ""}
, m_domain{get_node_domain(node_proto)}
, m_graph{&graph}
, m_attributes{std::begin(node_proto.attribute()), std::end(node_proto.attribute())}
, m_output_names{std::begin(node_proto.output()), std::end(node_proto.output())}
@ -65,6 +67,8 @@ namespace ngraph
private:
const ONNX_NAMESPACE::NodeProto* m_node_proto;
std::string m_name;
std::string m_domain;
const Graph* m_graph;
std::vector<Attribute> m_attributes;
std::vector<std::reference_wrapper<const std::string>> m_output_names;
@ -74,9 +78,9 @@ namespace ngraph
const ONNX_NAMESPACE::NodeProto& Node::Impl::node_proto() const { return *m_node_proto; }
const Graph& Node::Impl::graph() const { return *m_graph; }
const std::vector<Attribute>& Node::Impl::attributes() const { return m_attributes; }
const std::string& Node::Impl::domain() const { return m_node_proto->domain(); }
const std::string& Node::Impl::domain() const { return m_domain; }
const std::string& Node::Impl::op_type() const { return m_node_proto->op_type(); }
const std::string& Node::Impl::name() const { return m_node_proto->name(); }
const std::string& Node::Impl::name() const { return m_name; }
const std::vector<std::reference_wrapper<const std::string>>&
Node::Impl::get_output_names() const
{

View File

@ -73,6 +73,29 @@ namespace ngraph
} // namespace error
static const std::vector<std::string> legacy_ops_to_fixup = {
"FakeQuantize", "DetectionOutput", "Normalize", "PriorBox"};
// There are some models with custom OPs (list above) that has the default domain set.
// So in order to load the models, we need overwrite the OPs' domain to the one they're
// registered
void fixup_legacy_operators(ONNX_NAMESPACE::GraphProto* graph_proto)
{
for (auto& node : *graph_proto->mutable_node())
{
auto it = std::find(
legacy_ops_to_fixup.begin(), legacy_ops_to_fixup.end(), node.op_type());
if (it != legacy_ops_to_fixup.end())
{
if (!node.has_domain() || node.domain().empty() ||
node.domain() == "ai.onnx")
{
node.set_domain(OPENVINO_ONNX_DOMAIN);
}
}
}
}
std::shared_ptr<Function>
convert_to_ng_function(const ONNX_NAMESPACE::ModelProto& model_proto)
{
@ -119,6 +142,9 @@ namespace ngraph
}
#endif
}
detail::fixup_legacy_operators(model_proto.mutable_graph());
return detail::convert_to_ng_function(model_proto);
}

View File

@ -0,0 +1,104 @@
//*****************************************************************************
// Copyright 2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "ngraph/op/detection_output.hpp"
#include "ngraph/node.hpp"
#include "onnx_import/core/node.hpp"
#include "onnx_import/default_opset.hpp"
#include "onnx_import/op/org.openvinotoolkit/detection_output.hpp"
namespace ngraph
{
namespace onnx_import
{
namespace op
{
namespace set_1
{
OutputVector detection_output(const Node& node)
{
auto inputs = node.get_ng_inputs();
auto box_logits = inputs[0];
auto class_preds = inputs[1];
auto proposals = inputs[2];
ngraph::op::DetectionOutputAttrs attrs;
attrs.num_classes = node.get_attribute_value<int64_t>("num_classes");
attrs.background_label_id =
node.get_attribute_value<int64_t>("background_label_id", 0);
attrs.top_k = node.get_attribute_value<int64_t>("top_k", -1);
attrs.variance_encoded_in_target =
node.get_attribute_value<int64_t>("variance_encoded_in_target", 0);
// spec says keep_top_k is an array of ints, but some models use a single int
// also mkldnn expects single integer
attrs.keep_top_k = {
static_cast<int>(node.get_attribute_value<int64_t>("keep_top_k", 1))};
auto code_type = node.get_attribute_value<std::string>(
"code_type", std::string{"caffe.PriorBoxParameter.CORNER"});
// possible values are "caffe.PriorBoxParameter.CENTER_SIZE",
// "caffe.PriorBoxParameter.CORNER"
// but we can just have "CENTER_SIZE" or "CORNER" strings here, so we need to
// handle that case
if (code_type.find("caffe.PriorBoxParameter.") == std::string::npos)
{
code_type = "caffe.PriorBoxParameter." + code_type;
}
attrs.code_type = code_type;
attrs.share_location = node.get_attribute_value<int64_t>("share_location", 1);
attrs.nms_threshold = node.get_attribute_value<float>("nms_threshold");
attrs.confidence_threshold =
node.get_attribute_value<float>("confidence_threshold", 0);
attrs.clip_after_nms = node.get_attribute_value<int64_t>("clip_after_nms", 0);
attrs.clip_before_nms = node.get_attribute_value<int64_t>("clip_before_nms", 0);
attrs.decrease_label_id =
node.get_attribute_value<int64_t>("decrease_label_id", 0);
// TODO: per spec, normalized by default should be 0, but in MO it's 1.
attrs.normalized = node.get_attribute_value<int64_t>("normalized", 1);
attrs.input_width = node.get_attribute_value<int64_t>("input_width", 1);
attrs.input_height = node.get_attribute_value<int64_t>("input_height", 1);
attrs.objectness_score = node.get_attribute_value<float>("objectness_score", 0);
if (inputs.size() == 3)
{
return {std::make_shared<default_opset::DetectionOutput>(
box_logits, class_preds, proposals, attrs)};
}
else if (inputs.size() == 5)
{
auto aux_class_preds = inputs[3];
auto aux_box_preds = inputs[4];
return {std::make_shared<default_opset::DetectionOutput>(box_logits,
class_preds,
proposals,
aux_class_preds,
aux_box_preds,
attrs)};
}
else
{
NGRAPH_CHECK(false, "Invalid number of inputs");
}
}
} // namespace set_1
} // namespace op
} // namespace onnx_import
} // namespace ngraph

View File

@ -16,8 +16,8 @@
#include <memory>
#include "fake_quantize.hpp"
#include "onnx_import/default_opset.hpp"
#include "onnx_import/op/org.openvinotoolkit/fake_quantize.hpp"
namespace ngraph
{

View File

@ -0,0 +1,96 @@
//*****************************************************************************
// Copyright 2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "onnx_import/op/org.openvinotoolkit/normalize.hpp"
#include "ngraph/op/normalize_l2.hpp"
#include "onnx_import/default_opset.hpp"
#include "onnx_import/utils/common.hpp"
namespace ngraph
{
namespace onnx_import
{
namespace op
{
namespace set_1
{
OutputVector normalize(const Node& node)
{
auto inputs = node.get_ng_inputs();
NGRAPH_CHECK(inputs.size() == 2, "Invalid number of inputs");
auto data = inputs[0];
float eps = node.get_attribute_value<float>("eps", 0);
int64_t across_spatial = node.get_attribute_value<int64_t>("across_spatial", 0);
int64_t channel_shared = node.get_attribute_value<int64_t>("channel_shared", 0);
std::shared_ptr<ngraph::Node> weights;
if (channel_shared)
{
NGRAPH_CHECK(
ngraph::op::is_constant(inputs[1].get_node()),
"Weights input must be a constant if channel_shared is set to 1");
const auto& shape = inputs[1].get_partial_shape();
NGRAPH_CHECK(
shape.is_static() && shape.rank().get_length() == 1,
"Weights rank must be equal to 1 if channel_shared is set to 1");
weights = inputs[1].get_node_shared_ptr();
}
else
{
std::vector<int64_t> weights_shape{1};
const auto& data_shape = inputs[0].get_partial_shape();
if (data_shape[1].is_static())
{
weights_shape.push_back(data_shape[1].get_length());
}
else
{
weights_shape.push_back(0);
}
for (size_t i = 2; i < data_shape.rank().get_length(); ++i)
{
weights_shape.push_back(1);
}
auto new_shape = std::make_shared<default_opset::Constant>(
element::i64, Shape{weights_shape.size()}, weights_shape);
weights =
std::make_shared<default_opset::Reshape>(inputs[1], new_shape, true);
}
std::shared_ptr<ngraph::Node> axes;
if (!across_spatial)
{
axes = std::make_shared<default_opset::Constant>(
element::i64, Shape{1}, std::vector<int64_t>{1});
}
else
{
axes = common::get_monotonic_range_along_node_rank(data, 1);
}
return {std::make_shared<default_opset::Multiply>(
std::make_shared<default_opset::NormalizeL2>(
data, axes, eps, ngraph::op::EpsMode::ADD),
weights)};
}
} // namespace set_1
} // namespace op
} // namespace onnx_import
} // namespace ngraph

View File

@ -0,0 +1,92 @@
//*****************************************************************************
// Copyright 2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "ngraph/op/prior_box.hpp"
#include "ngraph/node.hpp"
#include "onnx_import/core/node.hpp"
#include "onnx_import/default_opset.hpp"
#include "onnx_import/op/org.openvinotoolkit/prior_box.hpp"
namespace ngraph
{
namespace onnx_import
{
namespace op
{
namespace detail
{
namespace
{
std::shared_ptr<default_opset::StridedSlice>
make_slice(std::shared_ptr<ngraph::Node> node, int64_t start, int64_t end)
{
return std::make_shared<default_opset::StridedSlice>(
node,
default_opset::Constant::create(
element::i64, Shape{1}, std::vector<int64_t>{start}),
default_opset::Constant::create(
element::i64, Shape{1}, std::vector<int64_t>{end}),
std::vector<int64_t>{0}, // begin mask
std::vector<int64_t>{0}); // end mask
}
}
} // detail
namespace set_1
{
OutputVector prior_box(const Node& node)
{
auto inputs = node.get_ng_inputs();
NGRAPH_CHECK(inputs.size() == 2, "Invalid number of inputs");
auto output_shape = std::make_shared<default_opset::ShapeOf>(inputs[0]);
auto image_shape = std::make_shared<default_opset::ShapeOf>(inputs[1]);
auto output_shape_slice = detail::make_slice(output_shape, 2, 4);
auto image_shape_slice = detail::make_slice(image_shape, 2, 4);
ngraph::op::PriorBoxAttrs attrs;
attrs.min_size = node.get_attribute_value<std::vector<float>>("min_size", {});
attrs.max_size = node.get_attribute_value<std::vector<float>>("max_size", {});
attrs.aspect_ratio =
node.get_attribute_value<std::vector<float>>("aspect_ratio", {});
attrs.flip = node.get_attribute_value<int64_t>("flip", 0);
attrs.clip = node.get_attribute_value<int64_t>("clip", 0);
attrs.step = node.get_attribute_value<float>("step", 0);
attrs.offset = node.get_attribute_value<float>("offset", 0);
attrs.variance = node.get_attribute_value<std::vector<float>>("variance", {});
attrs.scale_all_sizes = node.get_attribute_value<int64_t>("scale_all_sizes", 1);
attrs.fixed_ratio =
node.get_attribute_value<std::vector<float>>("fixed_ratio", {});
attrs.fixed_size =
node.get_attribute_value<std::vector<float>>("fixed_size", {});
attrs.density = node.get_attribute_value<std::vector<float>>("density", {});
auto axes = default_opset::Constant::create(
element::i64, Shape{1}, std::vector<int64_t>{0});
return {std::make_shared<default_opset::Unsqueeze>(
std::make_shared<default_opset::PriorBox>(
output_shape_slice, image_shape_slice, attrs),
axes)};
}
} // namespace set_1
} // namespace op
} // namespace onnx_import
} // namespace ngraph

View File

@ -57,7 +57,6 @@
#include "onnx_import/op/exp.hpp"
#include "onnx_import/op/expand.hpp"
#include "onnx_import/op/eye_like.hpp"
#include "onnx_import/op/fake_quantize.hpp"
#include "onnx_import/op/flatten.hpp"
#include "onnx_import/op/floor.hpp"
#include "onnx_import/op/gather.hpp"
@ -144,6 +143,11 @@
#include "onnx_import/op/xor.hpp"
#include "onnx_import/ops_bridge.hpp"
#include "onnx_import/op/org.openvinotoolkit/detection_output.hpp"
#include "onnx_import/op/org.openvinotoolkit/fake_quantize.hpp"
#include "onnx_import/op/org.openvinotoolkit/normalize.hpp"
#include "onnx_import/op/org.openvinotoolkit/prior_box.hpp"
namespace ngraph
{
namespace onnx_import
@ -249,6 +253,9 @@ namespace ngraph
#define REGISTER_OPERATOR(name_, ver_, fn_) \
m_map[""][name_].emplace(ver_, std::bind(op::set_##ver_::fn_, std::placeholders::_1))
#define REGISTER_OPERATOR_WITH_DOMAIN(domain_, name_, ver_, fn_) \
m_map[domain_][name_].emplace(ver_, std::bind(op::set_##ver_::fn_, std::placeholders::_1))
OperatorsBridge::OperatorsBridge()
{
REGISTER_OPERATOR("Abs", 1, abs);
@ -399,11 +406,16 @@ namespace ngraph
REGISTER_OPERATOR("Where", 1, where);
REGISTER_OPERATOR("Xor", 1, logical_xor);
// TODO Change the domain
m_map[""]["FakeQuantize"].emplace(1, op::set_1::fake_quantize);
// custom OPs
REGISTER_OPERATOR_WITH_DOMAIN(OPENVINO_ONNX_DOMAIN, "FakeQuantize", 1, fake_quantize);
REGISTER_OPERATOR_WITH_DOMAIN(
OPENVINO_ONNX_DOMAIN, "DetectionOutput", 1, detection_output);
REGISTER_OPERATOR_WITH_DOMAIN(OPENVINO_ONNX_DOMAIN, "PriorBox", 1, prior_box);
REGISTER_OPERATOR_WITH_DOMAIN(OPENVINO_ONNX_DOMAIN, "Normalize", 1, normalize);
}
#undef REGISTER_OPERATOR
#undef REGISTER_OPERATOR_WITH_DOMAIN
} // namespace onnx_import
} // namespace ngraph

View File

@ -111,10 +111,10 @@ namespace ngraph
{"SAME_UPPER", ngraph::op::PadType::SAME_UPPER},
{"SAME_LOWER", ngraph::op::PadType::SAME_LOWER},
{"NOTSET", ngraph::op::PadType::NOTSET},
{"", ngraph::op::PadType::NOTSET},
};
const std::string& pad_str{node.get_attribute_value<std::string>("auto_pad")};
const std::string& pad_str{
node.get_attribute_value<std::string>("auto_pad", "NOTSET")};
const auto pad_val_it = auto_pad_values.find(pad_str);
CHECK_VALID_NODE(node,
pad_val_it != auto_pad_values.end(),

View File

@ -6,10 +6,6 @@ graph {
input: "B"
output: "C"
op_type: "Conv"
attribute {
name: "auto_pad"
type: STRING
}
attribute {
name: "dilations"
ints: 1

View File

@ -7,10 +7,6 @@ graph {
input: "C"
output: "D"
op_type: "Conv"
attribute {
name: "auto_pad"
type: STRING
}
attribute {
name: "dilations"
ints: 2

View File

@ -0,0 +1,140 @@
ir_version: 6
producer_name: "nGraph ONNX Importer"
graph {
name: "test_graph"
node {
domain: "org.openvinotoolkit"
input: "box_logits"
input: "class_preds"
input: "proposals"
output: "out"
name: "DetectionOutput_500"
op_type: "DetectionOutput"
attribute {
name: "background_label_id"
i: 0
type: INT
}
attribute {
name: "code_type"
s: "CENTER_SIZE"
type: STRING
}
attribute {
name: "confidence_threshold"
f: 0.0099999997764825821
type: FLOAT
}
attribute {
name: "eta"
f: 1
type: FLOAT
}
attribute {
name: "keep_top_k"
i: 5
type: INT
}
attribute {
name: "nms_threshold"
f: 0.44999998807907104
type: FLOAT
}
attribute {
name: "num_classes"
i: 3
type: INT
}
attribute {
name: "share_location"
i: 1
type: INT
}
attribute {
name: "top_k"
i: 5
type: INT
}
attribute {
name: "variance_encoded_in_target"
i: 0
type: INT
}
}
input {
name: "box_logits"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 1
}
dim {
dim_value: 12
}
}
}
}
}
input {
name: "class_preds"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 1
}
dim {
dim_value: 9
}
}
}
}
}
input {
name: "proposals"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 1
}
dim {
dim_value: 2
}
dim {
dim_value: 15
}
}
}
}
}
output {
name: "out"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 1
}
dim {
dim_value: 1
}
dim {
dim_value: 5
}
dim {
dim_value: 7
}
}
}
}
}
}
opset_import {
version: 10
}

View File

@ -0,0 +1,83 @@
ir_version: 6
producer_name: "nGraph ONNX Importer"
graph {
name: "test"
node {
domain: "org.openvinotoolkit"
input: "data"
input: "weight"
output: "out"
name: "Normalize_177"
op_type: "Normalize"
attribute {
name: "across_spatial"
i: 0
type: INT
}
attribute {
name: "channel_shared"
i: 0
type: INT
}
attribute {
name: "eps"
f: 1.000000013351432e-10
type: FLOAT
}
}
initializer {
dims: 3
data_type: 1
name: "weight"
float_data: 2
float_data: 3
float_data: 4
}
input {
name: "data"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 1
}
dim {
dim_value: 3
}
dim {
dim_value: 2
}
dim {
dim_value: 2
}
}
}
}
}
output {
name: "out"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 1
}
dim {
dim_value: 3
}
dim {
dim_value: 2
}
dim {
dim_value: 2
}
}
}
}
}
}
opset_import {
version: 10
}

View File

@ -0,0 +1,147 @@
ir_version: 6
producer_name: "nGraph ONNX Importer"
graph {
name: "test"
node {
domain: "org.openvinotoolkit"
input: "A"
input: "B"
output: "out"
name: "PriorBox_306"
op_type: "PriorBox"
attribute {
name: "aspect_ratio"
floats: 1
type: FLOATS
}
attribute {
name: "clip"
i: 0
type: INT
}
attribute {
name: "flip"
i: 1
type: INT
}
attribute {
name: "img_h"
i: 0
type: INT
}
attribute {
name: "img_size"
i: 0
type: INT
}
attribute {
name: "img_w"
i: 0
type: INT
}
attribute {
name: "max_size"
floats: 76.800003051757812
type: FLOATS
}
attribute {
name: "min_size"
floats: 35.840000152587891
type: FLOATS
}
attribute {
name: "offset"
f: 0.5
type: FLOAT
}
attribute {
name: "step"
f: 8
type: FLOAT
}
attribute {
name: "step_h"
f: 0
type: FLOAT
}
attribute {
name: "step_w"
f: 0
type: FLOAT
}
attribute {
name: "variance"
floats: 0.10000000149011612
floats: 0.10000000149011612
floats: 0.20000000298023224
floats: 0.20000000298023224
type: FLOATS
}
}
input {
name: "A"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 1
}
dim {
dim_value: 3
}
dim {
dim_value: 2
}
dim {
dim_value: 2
}
}
}
}
}
input {
name: "B"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 1
}
dim {
dim_value: 3
}
dim {
dim_value: 6
}
dim {
dim_value: 6
}
}
}
}
}
output {
name: "out"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 1
}
dim {
dim_value: 2
}
dim {
dim_value: 32
}
}
}
}
}
}
opset_import {
version: 10
}

View File

@ -70,6 +70,7 @@ graph {
}
}
node {
domain: "org.openvinotoolkit"
input: "X"
input: "input_low"
input: "input_high"

View File

@ -2,6 +2,7 @@ ir_version: 7
producer_name: "onnx-importer-test"
graph {
node {
domain: "org.openvinotoolkit"
input: "X"
input: "input_low"
input: "input_high"

View File

@ -2470,3 +2470,85 @@ NGRAPH_TEST(${BACKEND_NAME}, quant_dequant_pattern_axis)
test_case.add_input<float>({1});
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, onnx_detection_output)
{
const auto function = onnx_import::import_onnx_model(
file_util::path_join(SERIALIZED_ZOO, "onnx/detection_output.prototxt"));
auto test_case = test::TestCase<TestEngine>(function);
auto gen_vector = [](size_t size, float min, float max) -> std::vector<float> {
float step = (max - min) / size;
float next = min - step;
std::vector<float> out(size);
std::generate(out.begin(), out.end(), [&next, &step] { return next += step; });
return out;
};
std::vector<float> logits = gen_vector(12, -2, 2);
std::vector<float> class_preds = gen_vector(9, 0, 1);
std::vector<float> proposals = gen_vector(15 * 2, 0, 1);
std::vector<float> output = {0, 1, 0.777778, 0.241012, 0.260378, 0.418248, 0.499622,
0, 1, 0.444444, 0.10963, 0.146239, 0.176296, 0.228576,
0, 2, 0.888889, 0.241012, 0.260378, 0.418248, 0.499622,
0, 2, 0.555556, 0.10963, 0.146239, 0.176296, 0.228576,
0, 2, 0.222222, -0.0378917, -0.00169918, -0.00210832, 0.0387362};
test_case.add_input<float>(logits);
test_case.add_input<float>(class_preds);
test_case.add_input<float>(proposals);
test_case.add_expected_output<float>(Shape{1, 1, 5, 7}, output);
int tolerance_bits = 6;
test_case.run(tolerance_bits);
}
NGRAPH_TEST(${BACKEND_NAME}, onnx_prior_box)
{
const auto function = onnx_import::import_onnx_model(
file_util::path_join(SERIALIZED_ZOO, "onnx/prior_box.prototxt"));
auto test_case = test::TestCase<TestEngine, test::TestCaseType::DYNAMIC>(function);
std::vector<float> A(3 * 2 * 2);
std::vector<float> B(3 * 6 * 6);
std::vector<float> output = {
-2.3200002, -2.3200002, 3.6533334, 3.6533334, -3.7053659, -3.7053659, 5.0386992,
5.0386992, -0.98666668, -2.3200002, 4.9866667, 3.6533334, -2.3720326, -3.7053659,
6.3720322, 5.0386992, -2.3200002, -0.98666668, 3.6533334, 4.9866667, -3.7053659,
-2.3720326, 5.0386992, 6.3720322, -0.98666668, -0.98666668, 4.9866667, 4.9866667,
-2.3720326, -2.3720326, 6.3720322, 6.3720322, 0.1, 0.1, 0.2,
0.2, 0.1, 0.1, 0.2, 0.2, 0.1, 0.1,
0.2, 0.2, 0.1, 0.1, 0.2, 0.2, 0.1,
0.1, 0.2, 0.2, 0.1, 0.1, 0.2, 0.2,
0.1, 0.1, 0.2, 0.2, 0.1, 0.1, 0.2,
0.2,
};
test_case.add_input<float>(A);
test_case.add_input<float>(B);
test_case.add_expected_output<float>(Shape{1, 2, 32}, output);
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, onnx_normalize)
{
const auto function = onnx_import::import_onnx_model(
file_util::path_join(SERIALIZED_ZOO, "onnx/normalize.prototxt"));
auto test_case = test::TestCase<TestEngine>(function);
std::vector<float> data(12);
std::iota(data.begin(), data.end(), 1);
std::vector<float> output = {
0.19334731,
0.33806169,
0.44846106,
0.53452247,
1.4501048,
1.5212777,
1.5696137,
1.6035674,
3.4802516,
3.3806169,
3.2887144,
3.2071347,
};
test_case.add_input<float>(data);
test_case.add_expected_output<float>(Shape{1, 3, 2, 2}, output);
test_case.run();
}

View File

@ -632,6 +632,15 @@ namespace
EXPECT_FALSE(op::is_binary_elementwise_logical(&node));
}
void op_is_PriorBox()
{
op::PriorBox node;
EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_comparison(&node));
EXPECT_FALSE(op::is_binary_elementwise_logical(&node));
}
void op_is_Product()
{
op::Product node;

View File

@ -176,7 +176,8 @@ bool runtime::interpreter::INTExecutable::call(const vector<shared_ptr<runtime::
// get op type
element::Type type;
if (is_type<op::Convert>(op) || is_type<op::Quantize>(op) || is_type<op::Dequantize>(op))
if (is_type<op::Convert>(op) || is_type<op::Quantize>(op) || is_type<op::Dequantize>(op) ||
is_type<op::PriorBox>(op))
{
type = op->get_input_element_type(0);
}

View File

@ -73,6 +73,7 @@
#include "ngraph/runtime/reference/not.hpp"
#include "ngraph/runtime/reference/one_hot.hpp"
#include "ngraph/runtime/reference/pad.hpp"
#include "ngraph/runtime/reference/prior_box.hpp"
#include "ngraph/runtime/reference/product.hpp"
#include "ngraph/runtime/reference/quantize.hpp"
#include "ngraph/runtime/reference/relu.hpp"
@ -881,6 +882,16 @@ protected:
break;
}
case OP_TYPEID::Parameter: break;
case OP_TYPEID::PriorBox:
{
const op::PriorBox* pbox = static_cast<const op::PriorBox*>(&node);
runtime::reference::prior_box<T>(args[0]->get_data_ptr<T>(),
args[1]->get_data_ptr<T>(),
out[0]->get_data_ptr<float>(),
out[0]->get_shape(),
pbox->get_attrs());
break;
}
case OP_TYPEID::Quantize:
{
const op::Quantize* quantize = static_cast<const op::Quantize*>(&node);

View File

@ -111,6 +111,7 @@ NGRAPH_OP(Or, ngraph::op)
NGRAPH_OP(Parameter, ngraph::op)
NGRAPH_OP(Power, ngraph::op)
NGRAPH_OP(PRelu, ngraph::op)
NGRAPH_OP(PriorBox, ngraph::op)
NGRAPH_OP(Product, ngraph::op)
NGRAPH_OP(Quantize, ngraph::op)
NGRAPH_OP(QuantizedConvolution, ngraph::op)