From 4302e2c120b85dff10cede76227e413acc7554db Mon Sep 17 00:00:00 2001 From: Pavel Esir Date: Sun, 16 Aug 2020 15:49:49 +0300 Subject: [PATCH] add preliminary support of Proposal-4 in nGraph (#1448) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit renamed logits -> bbox_deltas updated ngraph unittests for Proposal removed validate_and_infer_types Proposal-4 removed validate_and_infer_types Proposal-4 changed validate_and_infer_types in parent class of Proposal removed get_output_size successfully inferred Proposal on SSH and Faster-RCNN added unittests for Proposal-4 added unittests for Proposal-4 added unittests for Proposal-4 returned back default namespace for Proposal reduced number of outputs in v0::Proposal correct conversion of Proposal-4 -> propodal_ie with 2 outputs removed creator for proposal v0 removed converter for proposal v0 added Proposal-4 to MO removed `for_deformable` attribute added Proposal-4 to MO and nGraph Python API removed typo in Proposal-4 specification style corrections style corrections and removed some redundant code rename proposal Python api test removed 'attrs' context from visitor returned back AttrVisitor to check if passes OpenVINO ONNX pipeline Should pass OpenVINO ONNX pipeline (returned back AttrVisitor just to check) python api for Proposal-4 works ok (style correction) python api for Proposal-4 works ok parametrized proposal_ie some other corrections removed 'attrs.' context from nGraph Python API tests for Proposal minor corrections in replacer proposal->proposal_ie corrected Python API OpenVINO-ONNX tests should pass Improved workaround for AttributeVisitor for Proposal Add additional check of im_info tensor shape to Proposal node in MKLDNNPlugin 😠 removed 4 extra spaces from test_dyn_attributes.py to match The Style added new nGraph RTTI declarations, removed throwing exception in transformation added new nGraph RTTI declarations, removed throwing exception in transformation, corrected exception in MKLDNNplugin corrected im_info size checking in Proposal node of MKLDNNPlugin --- docs/ops/detection/Proposal_4.md | 2 +- .../cnn_network_ngraph_impl.cpp | 3 +- .../src/convert_function_to_cnn_network.cpp | 1 - .../src/ie_cnn_layer_builder_ngraph.cpp | 2 +- .../src/mkldnn_plugin/nodes/proposal.cpp | 15 +- .../src/readers/ir_reader/ie_ir_parser.cpp | 31 ---- .../include/ngraph_ops/proposal_ie.hpp | 4 +- .../convert_proposal_to_proposal_ie.hpp | 6 + .../src/ngraph_ops/proposal_ie.cpp | 23 +-- .../convert_opset1_to_legacy.cpp | 1 + .../convert_proposal_to_proposal_ie.cpp | 98 +++++----- .../extensions/back/ProposalMutation.py | 5 - .../front/mxnet/custom_rpn_proposal.py | 1 - model-optimizer/extensions/ops/proposal.py | 4 +- .../core/include/ngraph/attribute_visitor.hpp | 6 +- ngraph/core/include/ngraph/op/proposal.hpp | 42 ++++- .../core/include/ngraph/opsets/opset4_tbl.hpp | 2 +- ngraph/core/src/op/proposal.cpp | 76 +++++--- ngraph/python/src/ngraph/opset1/ops.py | 34 ++-- ngraph/python/src/ngraph/opset4/__init__.py | 2 +- ngraph/python/src/ngraph/opset4/ops.py | 117 ++++++++++++ .../tests/test_ngraph/test_create_op.py | 24 +-- .../tests/test_ngraph/test_dyn_attributes.py | 40 ++--- .../python/tests/test_ngraph/test_proposal.py | 48 +++++ ngraph/test/attributes.cpp | 3 +- ngraph/test/type_prop/proposal.cpp | 170 ++++++++++++++++-- ngraph/test/type_prop_layers.cpp | 17 -- 27 files changed, 564 insertions(+), 213 deletions(-) create mode 100644 ngraph/python/tests/test_ngraph/test_proposal.py diff --git a/docs/ops/detection/Proposal_4.md b/docs/ops/detection/Proposal_4.md index a2008e10f2f..9281b5a728b 100644 --- a/docs/ops/detection/Proposal_4.md +++ b/docs/ops/detection/Proposal_4.md @@ -149,7 +149,7 @@ the second optional tensor of shape `[batch_size * post_nms_topn]` with probabil * **3**: 1D tensor of type *T* with 3 or 4 elements: `[image_height, image_width, scale_height_and_width]` or `[image_height, image_width, scale_height, scale_width]`. Required. -**Outputs**4 +**Outputs** * **1**: tensor of type *T* and shape `[batch_size * post_nms_topn, 5]`. diff --git a/inference-engine/src/inference_engine/cnn_network_ngraph_impl.cpp b/inference-engine/src/inference_engine/cnn_network_ngraph_impl.cpp index fbd4060236d..232d61c2d58 100644 --- a/inference-engine/src/inference_engine/cnn_network_ngraph_impl.cpp +++ b/inference-engine/src/inference_engine/cnn_network_ngraph_impl.cpp @@ -73,7 +73,8 @@ void CNNNetworkNGraphImpl::createDataForResult(const ::ngraph::Output<::ngraph:: dims = output.get_shape(); } for (const auto& dim : dims) { - if (!dim) THROW_IE_EXCEPTION << outName << " has zero dimension that is not allowable"; + if (!dim) + THROW_IE_EXCEPTION << outName << " has zero dimension that is not allowable"; } if (ptr) { diff --git a/inference-engine/src/legacy_api/src/convert_function_to_cnn_network.cpp b/inference-engine/src/legacy_api/src/convert_function_to_cnn_network.cpp index e85d8627c58..cced81480b2 100644 --- a/inference-engine/src/legacy_api/src/convert_function_to_cnn_network.cpp +++ b/inference-engine/src/legacy_api/src/convert_function_to_cnn_network.cpp @@ -607,7 +607,6 @@ void convertFunctionToICNNNetwork(const std::shared_ptr>(), std::make_shared>(), std::make_shared>(), - std::make_shared>(), std::make_shared>(), std::make_shared>(), std::make_shared>(), diff --git a/inference-engine/src/legacy_api/src/ie_cnn_layer_builder_ngraph.cpp b/inference-engine/src/legacy_api/src/ie_cnn_layer_builder_ngraph.cpp index 72efb5ed379..abe61fb7dc9 100644 --- a/inference-engine/src/legacy_api/src/ie_cnn_layer_builder_ngraph.cpp +++ b/inference-engine/src/legacy_api/src/ie_cnn_layer_builder_ngraph.cpp @@ -1424,7 +1424,7 @@ CNNLayer::Ptr NodeConverter::createLayer( } template <> -CNNLayer::Ptr NodeConverter::createLayer(const std::shared_ptr& layer) const { +CNNLayer::Ptr NodeConverter::createLayer(const std::shared_ptr& layer) const { THROW_IE_EXCEPTION << "Proposal operation should be converted to ProposalIE"; } diff --git a/inference-engine/src/mkldnn_plugin/nodes/proposal.cpp b/inference-engine/src/mkldnn_plugin/nodes/proposal.cpp index 4c794fe73ad..49575363370 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/proposal.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/proposal.cpp @@ -147,7 +147,18 @@ public: p_prob_item = outputs[1]->buffer(); auto dims0 = inputs[0]->getTensorDesc().getDims(); - size_t img_info_size = inputs[2]->getTensorDesc().getDims()[1]; + auto img_info_dims = inputs[2]->getTensorDesc().getDims(); + if (img_info_dims.size() != 2) + THROW_IE_EXCEPTION << "Size of im_info tensor for Proposal is incorrect! Size of im_info must be 2. " + << "Now im_info size is " << img_info_dims.size() << "."; + + if (img_info_dims[1] != 3 && img_info_dims[1] != 4) + THROW_IE_EXCEPTION << "Shape of im_info tensor for Proposal is incorrect! " + << "Shape of im_info must be of [1, 3] or [1, 4]! " + << "Now shape of im_info is" << img_info_dims[0] << ", " << img_info_dims[1] << "]."; + + size_t img_info_size = img_info_dims[1]; + // input image height & width const float img_H = p_img_info_cpu[0]; @@ -155,7 +166,7 @@ public: // scale factor for height & width const float scale_H = p_img_info_cpu[2]; - const float scale_W = img_info_size > 3 ? p_img_info_cpu[3] : scale_H; + const float scale_W = img_info_size == 4 ? p_img_info_cpu[3] : scale_H; XARCH::proposal_exec(p_bottom_item, p_d_anchor_item, dims0, {img_H, img_W, scale_H, scale_W}, anchors.data(), roi_indices.data(), p_roi_item, p_prob_item, conf); diff --git a/inference-engine/src/readers/ir_reader/ie_ir_parser.cpp b/inference-engine/src/readers/ir_reader/ie_ir_parser.cpp index 0f21d2ffaf3..32c76d4b916 100644 --- a/inference-engine/src/readers/ir_reader/ie_ir_parser.cpp +++ b/inference-engine/src/readers/ir_reader/ie_ir_parser.cpp @@ -330,7 +330,6 @@ std::shared_ptr V10Parser::createNode(const std::vector>("Range"), std::make_shared>("PriorBox"), std::make_shared>("PriorBoxClustered"), - std::make_shared>("Proposal"), std::make_shared>("ReduceMax"), std::make_shared>("ReduceMin"), std::make_shared>("ReduceMean"), @@ -716,36 +715,6 @@ std::shared_ptr V10Parser::LayerCreator(inputs[0], inputs[1], attr); } -// Proposal layer -template <> -std::shared_ptr V10Parser::LayerCreator::createLayer( - const ngraph::OutputVector& inputs, const pugi::xml_node& node, std::istream& binStream, - const GenericLayerParams& layerParsePrms) { - checkParameters(inputs, layerParsePrms, 3); - pugi::xml_node dn = node.child("data"); - - if (dn.empty()) - THROW_IE_EXCEPTION << "Cannot read parameter for " << getType() << " layer with name: " << layerParsePrms.name; - - ngraph::op::ProposalAttrs attr; - attr.base_size = GetUIntAttr(dn, "base_size"); - attr.pre_nms_topn = GetUIntAttr(dn, "pre_nms_topn"); - attr.post_nms_topn = GetUIntAttr(dn, "post_nms_topn"); - attr.nms_thresh = GetFloatAttr(dn, "nms_thresh"); - attr.feat_stride = GetUIntAttr(dn, "feat_stride"); - attr.min_size = GetUIntAttr(dn, "min_size"); - attr.ratio = getParameters(dn, "ratio"); - attr.scale = getParameters(dn, "scale"); - attr.clip_after_nms = (GetIntAttr(dn, "clip_after_nms", 0) != 0); - attr.clip_before_nms = (GetIntAttr(dn, "clip_before_nms", 1) != 0); - attr.normalize = (GetIntAttr(dn, "normalize", 0) != 0); - attr.box_size_scale = GetFloatAttr(dn, "box_size_scale", 1.0f); - attr.box_coordinate_scale = GetFloatAttr(dn, "box_coordinate_scale", 1.0f); - attr.framework = GetStrAttr(dn, "framework", ""); - - return std::make_shared(inputs[0], inputs[1], inputs[2], attr); -} - // PriorBox layer template <> std::shared_ptr V10Parser::LayerCreator::createLayer( diff --git a/inference-engine/src/transformations/include/ngraph_ops/proposal_ie.hpp b/inference-engine/src/transformations/include/ngraph_ops/proposal_ie.hpp index e7f2a2a5570..0f8b15bc6c0 100644 --- a/inference-engine/src/transformations/include/ngraph_ops/proposal_ie.hpp +++ b/inference-engine/src/transformations/include/ngraph_ops/proposal_ie.hpp @@ -22,11 +22,11 @@ public: // \brief Constructs a Proposal operation // // \param class_probs Class probability scores - // \param class_logits Class prediction logits + // \param class_bbox_deltas Class prediction bbox_deltas // \param image_shape Shape of image // \param attrs Proposal op attributes ProposalIE(const Output& class_probs, - const Output& class_logits, + const Output& class_bbox_deltas, const Output& image_shape, const ProposalAttrs& attrs); diff --git a/inference-engine/src/transformations/include/transformations/convert_opset1_to_legacy/convert_proposal_to_proposal_ie.hpp b/inference-engine/src/transformations/include/transformations/convert_opset1_to_legacy/convert_proposal_to_proposal_ie.hpp index 835cbe5bb19..65b01710e18 100644 --- a/inference-engine/src/transformations/include/transformations/convert_opset1_to_legacy/convert_proposal_to_proposal_ie.hpp +++ b/inference-engine/src/transformations/include/transformations/convert_opset1_to_legacy/convert_proposal_to_proposal_ie.hpp @@ -15,10 +15,16 @@ namespace ngraph { namespace pass { class TRANSFORMATIONS_API ConvertProposalToLegacyMatcher; +class TRANSFORMATIONS_API ConvertProposal4ToLegacyMatcher; } // namespace pass } // namespace ngraph +class ngraph::pass::ConvertProposal4ToLegacyMatcher: public ngraph::pass::MatcherPass { +public: + ConvertProposal4ToLegacyMatcher(); +}; + class ngraph::pass::ConvertProposalToLegacyMatcher: public ngraph::pass::MatcherPass { public: ConvertProposalToLegacyMatcher(); diff --git a/inference-engine/src/transformations/src/ngraph_ops/proposal_ie.cpp b/inference-engine/src/transformations/src/ngraph_ops/proposal_ie.cpp index f856c1e474c..b61eba9ad38 100644 --- a/inference-engine/src/transformations/src/ngraph_ops/proposal_ie.cpp +++ b/inference-engine/src/transformations/src/ngraph_ops/proposal_ie.cpp @@ -13,30 +13,29 @@ using namespace ngraph; constexpr NodeTypeInfo op::ProposalIE::type_info; -op::ProposalIE::ProposalIE(const Output& class_probs, const Output& class_logits, +op::ProposalIE::ProposalIE(const Output& class_probs, const Output& class_bbox_deltas, const Output& image_shape, const ProposalAttrs& attrs) - : Op({class_probs, class_logits, image_shape}), m_attrs(attrs) { + : Op({class_probs, class_bbox_deltas, image_shape}), m_attrs(attrs) { constructor_validate_and_infer_types(); } void op::ProposalIE::validate_and_infer_types() { - set_input_is_relevant_to_shape(2); - const auto& class_probs_pshape = get_input_partial_shape(0); - const auto& class_logits_pshape = get_input_partial_shape(1); + const auto& class_bbox_deltas_pshape = get_input_partial_shape(1); const auto& image_shape_pshape = get_input_partial_shape(2); - if (class_probs_pshape.is_static() && class_logits_pshape.is_static() && image_shape_pshape.is_static()) { + + if (class_probs_pshape.is_static() && class_bbox_deltas_pshape.is_static() && image_shape_pshape.is_static()) { const Shape class_probs_shape {class_probs_pshape.to_shape()}; - const Shape class_logits_shape {class_logits_pshape.to_shape()}; + const Shape class_bbox_deltas_shape {class_bbox_deltas_pshape.to_shape()}; const Shape image_shape_shape {image_shape_pshape.to_shape()}; NODE_VALIDATION_CHECK( this, class_probs_shape.size() == 4, "Proposal layer shape class_probs input must have rank 4 (class_probs_shape: ", class_probs_shape, ")."); - NODE_VALIDATION_CHECK(this, class_logits_shape.size() == 4, - "Proposal layer shape class_logits_shape input must have rank 4 (class_logits_shape: ", - class_logits_shape, ")."); + NODE_VALIDATION_CHECK(this, class_bbox_deltas_shape.size() == 4, + "Proposal layer shape class_bbox_deltas_shape input must have rank 4 (class_bbox_deltas_shape: ", + class_bbox_deltas_shape, ")."); NODE_VALIDATION_CHECK( this, image_shape_shape.size() == 2, @@ -48,8 +47,12 @@ void op::ProposalIE::validate_and_infer_types() { auto batch_size = class_probs_shape[0]; set_output_type(0, get_input_element_type(0), Shape {batch_size * m_attrs.post_nms_topn, 5}); + if (m_attrs.infer_probs) + set_output_type(1, get_input_element_type(0), Shape {batch_size * m_attrs.post_nms_topn}); } else { set_output_type(0, get_input_element_type(0), PartialShape::dynamic()); + if (m_attrs.infer_probs) + set_output_type(1, get_input_element_type(0), PartialShape::dynamic()); } } diff --git a/inference-engine/src/transformations/src/transformations/convert_opset1_to_legacy/convert_opset1_to_legacy.cpp b/inference-engine/src/transformations/src/transformations/convert_opset1_to_legacy/convert_opset1_to_legacy.cpp index 784b4cbe8ec..bccce874c08 100644 --- a/inference-engine/src/transformations/src/transformations/convert_opset1_to_legacy/convert_opset1_to_legacy.cpp +++ b/inference-engine/src/transformations/src/transformations/convert_opset1_to_legacy/convert_opset1_to_legacy.cpp @@ -125,6 +125,7 @@ bool ngraph::pass::ConvertOpSet1ToLegacy::run_on_function(std::shared_ptradd_matcher(); anchor->add_matcher(); anchor->add_matcher(); + anchor->add_matcher(); anchor->add_matcher(); anchor->add_matcher(); anchor->add_matcher(); diff --git a/inference-engine/src/transformations/src/transformations/convert_opset1_to_legacy/convert_proposal_to_proposal_ie.cpp b/inference-engine/src/transformations/src/transformations/convert_opset1_to_legacy/convert_proposal_to_proposal_ie.cpp index 531f87ee099..1c54a09c672 100644 --- a/inference-engine/src/transformations/src/transformations/convert_opset1_to_legacy/convert_proposal_to_proposal_ie.cpp +++ b/inference-engine/src/transformations/src/transformations/convert_opset1_to_legacy/convert_proposal_to_proposal_ie.cpp @@ -8,58 +8,72 @@ #include #include - +#include +#include #include #include +bool convert_to_proposal_ie(std::shared_ptr proposal, bool infer_probs = false) { + ngraph::Output last; // 2D tensor of size [1, 3-4] with im_info will be retrieved from this node + ngraph::NodeVector ops_to_replace, new_ops; + ops_to_replace.push_back(proposal); + + if (auto reshape = std::dynamic_pointer_cast(proposal->input_value(2).get_node_shared_ptr())) { + const ngraph::PartialShape& im_info_shape = reshape->get_input_partial_shape(0); + if (im_info_shape != ngraph::Shape({1, 3}) && im_info_shape != ngraph::Shape({1, 4})) { + return false; + } + last = reshape->input_value(0); + ops_to_replace.push_back(reshape); + } else { + auto const_shape = ngraph::opset1::Constant::create(ngraph::element::i64, ngraph::Shape{2}, {1, -1}); + last = std::make_shared(proposal->input_value(2), const_shape, true); + new_ops.push_back(last.get_node_shared_ptr()); + } + + auto ie_attrs = proposal->get_attrs(); + ie_attrs.infer_probs = infer_probs; + auto proposal_ie = std::make_shared(proposal->input_value(0), + proposal->input_value(1), + last, + ie_attrs); + new_ops.push_back(proposal_ie); + + proposal_ie->set_friendly_name(proposal->get_friendly_name()); + ngraph::copy_runtime_info(ops_to_replace, new_ops); + ngraph::replace_node(proposal, proposal_ie); + + return true; +} + ngraph::pass::ConvertProposalToLegacyMatcher::ConvertProposalToLegacyMatcher() { - auto input_0 = std::make_shared(element::f32, Shape{1, 1, 1, 1}); - auto input_1 = std::make_shared(element::f32, Shape{1, 1, 1, 1}); - auto input_2 = std::make_shared(element::f32, Shape{3}); + auto proposal = ngraph::pattern::wrap_type(); - ngraph::op::ProposalAttrs attr = {}; - - auto proposal = std::make_shared(input_0, input_1, input_2, attr); - - ngraph::matcher_pass_callback callback = [](pattern::Matcher& m) { - auto proposal = std::dynamic_pointer_cast (m.get_match_root()); + ngraph::matcher_pass_callback callback = [](pattern::Matcher &m) { + auto proposal = std::dynamic_pointer_cast(m.get_match_root()); if (!proposal) { return false; } - - Output last; - - ngraph::NodeVector ops_to_replace, new_ops; - ops_to_replace.push_back(proposal); - - if (auto reshape = std::dynamic_pointer_cast(proposal->input_value(2).get_node_shared_ptr())) { - auto input_shape = reshape->get_input_shape(0); - if (input_shape.size() == 2) { - last = reshape->input_value(0); - ops_to_replace.push_back(reshape); - } - } - - if (!last.get_node_shared_ptr()) { - std::vector dims{1, -1}; - auto const_shape = std::make_shared(element::i64, Shape{2}, dims); - last = std::make_shared(proposal->input_value(2), const_shape, true); - new_ops.push_back(last.get_node_shared_ptr()); - } - - auto proposal_ie = std::make_shared (proposal->input_value(0), - proposal->input_value(1), - last, - proposal->get_attrs()); - new_ops.push_back(proposal_ie); - - proposal_ie->set_friendly_name(proposal->get_friendly_name()); - ngraph::copy_runtime_info(ops_to_replace, new_ops); - ngraph::replace_node(proposal, proposal_ie); + convert_to_proposal_ie(proposal); return true; }; - auto m = std::make_shared(proposal, "ConvertProposalToProposalIE"); this->register_matcher(m, callback); -} \ No newline at end of file +} + +ngraph::pass::ConvertProposal4ToLegacyMatcher::ConvertProposal4ToLegacyMatcher() { + auto proposal = ngraph::pattern::wrap_type(); + + ngraph::matcher_pass_callback callback = [](pattern::Matcher &m) { + auto proposal = std::dynamic_pointer_cast(m.get_match_root()); + + if (!proposal) { + return false; + } + convert_to_proposal_ie(proposal, true); + return true; + }; + auto m = std::make_shared(proposal, "ConvertProposal4ToProposalIE"); + this->register_matcher(m, callback); +} diff --git a/model-optimizer/extensions/back/ProposalMutation.py b/model-optimizer/extensions/back/ProposalMutation.py index aaf548e2af9..1ceba78447b 100644 --- a/model-optimizer/extensions/back/ProposalMutation.py +++ b/model-optimizer/extensions/back/ProposalMutation.py @@ -75,8 +75,3 @@ class ProposalMutation(BackReplacementPattern): reshape = create_op_node_with_second_input(graph, Reshape, [im_info_shape[1]], {'name': 'im_info/Reshape'}) node.in_port(2).get_connection().set_destination(reshape.in_port(0)) reshape.out_port(0).connect(node.in_port(2)) - - if node.has_port('out', 1) and not node.out_port(1).disconnected(): - # This is the case when Proposal layer is used from extension, not from opset. - # Setting version attribute is not recommended, this will be fixed after Proposal will be updated in IE. - graph.node[node.id]['version'] = 'extension' diff --git a/model-optimizer/extensions/front/mxnet/custom_rpn_proposal.py b/model-optimizer/extensions/front/mxnet/custom_rpn_proposal.py index dd08d5812a0..dfc8848f697 100644 --- a/model-optimizer/extensions/front/mxnet/custom_rpn_proposal.py +++ b/model-optimizer/extensions/front/mxnet/custom_rpn_proposal.py @@ -44,7 +44,6 @@ class RPNProposalMXNetFrontExtractor(MXNetCustomFrontExtractorOp): 'pre_nms_topn': pre_nms_topn, 'post_nms_topn': post_nms_topn, 'nms_thresh': nms_thresh, - 'for_deformable': 1, } ProposalOp.update_node_stat(node, node_attrs) diff --git a/model-optimizer/extensions/ops/proposal.py b/model-optimizer/extensions/ops/proposal.py index afd25a78021..928b32d8dc3 100644 --- a/model-optimizer/extensions/ops/proposal.py +++ b/model-optimizer/extensions/ops/proposal.py @@ -27,12 +27,11 @@ class ProposalOp(Op): mandatory_props = { 'type': __class__.op, 'op': __class__.op, - 'version': 'opset1', + 'version': 'opset4', 'post_nms_topn': 300, # default in caffe-shared 'infer': ProposalOp.proposal_infer, 'in_ports_count': 3, 'out_ports_count': 2, - 'for_deformable': 0, 'normalize': 0, } super().__init__(graph, mandatory_props, attrs) @@ -65,7 +64,6 @@ class ProposalOp(Op): 'normalize', 'clip_after_nms', 'clip_before_nms', - 'for_deformable', ] @staticmethod diff --git a/ngraph/core/include/ngraph/attribute_visitor.hpp b/ngraph/core/include/ngraph/attribute_visitor.hpp index b837444bacc..63560d6099f 100644 --- a/ngraph/core/include/ngraph/attribute_visitor.hpp +++ b/ngraph/core/include/ngraph/attribute_visitor.hpp @@ -123,7 +123,11 @@ namespace ngraph { AttributeAdapter adapter(value); start_structure(name); - on_adapter(get_name_with_context(), adapter); + // todo: it's workaround, remove after fixing #35906 + if (get_name_with_context().find("the_proposal") != std::string::npos) + on_adapter(name, adapter); + else + on_adapter(get_name_with_context(), adapter); finish_structure(); } /// \returns The nested context of visits diff --git a/ngraph/core/include/ngraph/op/proposal.hpp b/ngraph/core/include/ngraph/op/proposal.hpp index 51448cee286..631a3fced3e 100644 --- a/ngraph/core/include/ngraph/op/proposal.hpp +++ b/ngraph/core/include/ngraph/op/proposal.hpp @@ -33,8 +33,8 @@ namespace ngraph // clip_before_nms Clip before NMs // clip_after_nms Clip after NMs // normalize Normalize boxes to [0,1] - // box_size_scale Scale factor for scaling box size logits - // box_coordinate_scale Scale factor for scaling box coordiate logits + // box_size_scale Scale factor for scaling box size + // box_coordinate_scale Scale factor for scaling box coordiate // framework Calculation frameworkrithm to use struct ProposalAttrs { @@ -46,12 +46,13 @@ namespace ngraph size_t min_size = 1; std::vector ratio; std::vector scale; - bool clip_before_nms = false; + bool clip_before_nms = true; bool clip_after_nms = false; bool normalize = false; float box_size_scale = 1.0f; float box_coordinate_scale = 1.0f; std::string framework; + bool infer_probs = false; }; namespace v0 @@ -59,17 +60,16 @@ namespace ngraph class NGRAPH_API Proposal : public Op { public: - static constexpr NodeTypeInfo type_info{"Proposal", 0}; - const NodeTypeInfo& get_type_info() const override { return type_info; } + NGRAPH_RTTI_DECLARATION; Proposal() = default; /// \brief Constructs a Proposal operation /// /// \param class_probs Class probability scores - /// \param class_logits Class prediction logits + /// \param bbox_deltas Prediction of bounding box deltas /// \param image_shape Shape of image /// \param attrs Proposal op attributes Proposal(const Output& class_probs, - const Output& class_logits, + const Output& bbox_deltas, const Output& image_shape, const ProposalAttrs& attrs); @@ -79,10 +79,36 @@ namespace ngraph const ProposalAttrs& get_attrs() const { return m_attrs; } virtual bool visit_attributes(AttributeVisitor& visitor) override; - private: + protected: ProposalAttrs m_attrs; }; } + + namespace v4 + { + class NGRAPH_API Proposal : public op::v0::Proposal + { + public: + NGRAPH_RTTI_DECLARATION; + Proposal() = default; + /// \brief Constructs a Proposal operation + /// + /// \param class_probs Class probability scores + /// \param bbox_deltas Prediction of bounding box deltas + /// \param image_shape Shape of image + /// \param attrs Proposal op attributes + Proposal(const Output& class_probs, + const Output& bbox_deltas, + const Output& image_shape, + const ProposalAttrs& attrs); + + void validate_and_infer_types() override; + virtual std::shared_ptr + clone_with_new_inputs(const OutputVector& new_args) const override; + const ProposalAttrs& get_attrs() const { return m_attrs; } + }; + } + using v0::Proposal; } diff --git a/ngraph/core/include/ngraph/opsets/opset4_tbl.hpp b/ngraph/core/include/ngraph/opsets/opset4_tbl.hpp index 4b0dd226f1e..c7ece5b73ef 100644 --- a/ngraph/core/include/ngraph/opsets/opset4_tbl.hpp +++ b/ngraph/core/include/ngraph/opsets/opset4_tbl.hpp @@ -91,7 +91,7 @@ NGRAPH_OP(Parameter, ngraph::op::v0) NGRAPH_OP(Power, ngraph::op::v1) NGRAPH_OP(PriorBox, ngraph::op::v0) NGRAPH_OP(PriorBoxClustered, ngraph::op::v0) -NGRAPH_OP(Proposal, ngraph::op::v0) +NGRAPH_OP(Proposal, ngraph::op::v4) NGRAPH_OP(Range, ngraph::op::v0) NGRAPH_OP(Relu, ngraph::op::v0) NGRAPH_OP(ReduceMax, ngraph::op::v1) diff --git a/ngraph/core/src/op/proposal.cpp b/ngraph/core/src/op/proposal.cpp index ee45066a6fd..bf2b254f070 100644 --- a/ngraph/core/src/op/proposal.cpp +++ b/ngraph/core/src/op/proposal.cpp @@ -21,30 +21,28 @@ using namespace std; using namespace ngraph; -constexpr NodeTypeInfo op::Proposal::type_info; +NGRAPH_RTTI_DEFINITION(op::v0::Proposal, "Proposal", 0); -op::Proposal::Proposal(const Output& class_probs, - const Output& class_logits, - const Output& image_shape, - const ProposalAttrs& attrs) - : Op({class_probs, class_logits, image_shape}) +op::v0::Proposal::Proposal(const Output& class_probs, + const Output& bbox_deltas, + const Output& image_shape, + const ProposalAttrs& attrs) + : Op({class_probs, bbox_deltas, image_shape}) , m_attrs(attrs) { constructor_validate_and_infer_types(); } -void op::Proposal::validate_and_infer_types() +void op::v0::Proposal::validate_and_infer_types() { - set_input_is_relevant_to_shape(2); - const auto& class_probs_pshape = get_input_partial_shape(0); - const auto& class_logits_pshape = get_input_partial_shape(1); + const auto& class_bbox_deltas_pshape = get_input_partial_shape(1); const auto& image_shape_pshape = get_input_partial_shape(2); - if (class_probs_pshape.is_static() && class_logits_pshape.is_static() && + if (class_probs_pshape.is_static() && class_bbox_deltas_pshape.is_static() && image_shape_pshape.is_static()) { const Shape class_probs_shape{class_probs_pshape.to_shape()}; - const Shape class_logits_shape{class_logits_pshape.to_shape()}; + const Shape class_bbox_deltas_shape{class_bbox_deltas_pshape.to_shape()}; const Shape image_shape_shape{image_shape_pshape.to_shape()}; NODE_VALIDATION_CHECK( @@ -54,12 +52,12 @@ void op::Proposal::validate_and_infer_types() class_probs_shape, ")."); - NODE_VALIDATION_CHECK( - this, - class_logits_shape.size() == 4, - "Proposal layer shape class_logits_shape input must have rank 4 (class_logits_shape: ", - class_logits_shape, - ")."); + NODE_VALIDATION_CHECK(this, + class_bbox_deltas_shape.size() == 4, + "Proposal layer shape class_bbox_deltas_shape input must have rank 4 " + "(class_bbox_deltas_shape: ", + class_bbox_deltas_shape, + ")."); NODE_VALIDATION_CHECK( this, @@ -84,15 +82,17 @@ void op::Proposal::validate_and_infer_types() } } -shared_ptr op::Proposal::clone_with_new_inputs(const OutputVector& new_args) const +shared_ptr op::v0::Proposal::clone_with_new_inputs(const OutputVector& new_args) const { check_new_args_count(this, new_args); - return make_shared(new_args.at(0), new_args.at(1), new_args.at(2), m_attrs); + return make_shared(new_args.at(0), new_args.at(1), new_args.at(2), m_attrs); } -bool op::Proposal::visit_attributes(AttributeVisitor& visitor) +bool op::v0::Proposal::visit_attributes(AttributeVisitor& visitor) { - visitor.on_attribute("attrs", m_attrs); + // temporary workaround, remove after #35906 is fixed + // visitor.on_attribute("attrs", m_attrs); + visitor.on_attribute("the_proposal", m_attrs); return true; } @@ -121,3 +121,35 @@ bool AttributeAdapter::visit_attributes(AttributeVisitor& vis visitor.on_attribute("framework", m_ref.framework); return true; } + +NGRAPH_RTTI_DEFINITION(op::v4::Proposal, "Proposal", 4); + +op::v4::Proposal::Proposal(const Output& class_probs, + const Output& class_bbox_deltas, + const Output& image_shape, + const op::ProposalAttrs& attrs) + : v0::Proposal(class_probs, class_bbox_deltas, image_shape, attrs) +{ + constructor_validate_and_infer_types(); +} + +void op::v4::Proposal::validate_and_infer_types() +{ + v0::Proposal::validate_and_infer_types(); + + const auto& class_probs_pshape = get_input_partial_shape(0); + const auto& class_bbox_deltas_pshape = get_input_partial_shape(1); + const auto& image_shape_pshape = get_input_partial_shape(2); + auto batch_size = class_probs_pshape.to_shape()[0]; + if (class_probs_pshape.is_static() && class_bbox_deltas_pshape.is_static() && + image_shape_pshape.is_static()) + set_output_type(1, get_input_element_type(0), Shape{batch_size * m_attrs.post_nms_topn}); + else + set_output_type(1, get_input_element_type(0), PartialShape::dynamic()); +} + +std::shared_ptr op::v4::Proposal::clone_with_new_inputs(const OutputVector& new_args) const +{ + check_new_args_count(this, new_args); + return make_shared(new_args.at(0), new_args.at(1), new_args.at(2), m_attrs); +} diff --git a/ngraph/python/src/ngraph/opset1/ops.py b/ngraph/python/src/ngraph/opset1/ops.py index 4bd772bf33b..47bda098ef9 100644 --- a/ngraph/python/src/ngraph/opset1/ops.py +++ b/ngraph/python/src/ngraph/opset1/ops.py @@ -2068,7 +2068,7 @@ def prior_box( @nameable_op def proposal( class_probs: Node, - box_logits: Node, + bbox_deltas: Node, image_shape: NodeInput, attrs: dict, name: Optional[str] = None, @@ -2076,7 +2076,7 @@ def proposal( """Filter bounding boxes and outputs only those with the highest prediction confidence. :param class_probs: 4D input floating point tensor with class prediction scores. - :param box_logits: 4D input floating point tensor with box logits. + :param bbox_deltas: 4D input floating point tensor with box logits. :param image_shape: The 1D input tensor with 3 or 4 elements describing image shape. :param attrs: The dictionary containing key, value pairs for attributes. :param name: Optional name for the output node. @@ -2178,26 +2178,26 @@ def proposal( :return: Node representing Proposal operation. """ requirements = [ - ("attrs.base_size", True, np.unsignedinteger, is_positive_value), - ("attrs.pre_nms_topn", True, np.unsignedinteger, is_positive_value), - ("attrs.post_nms_topn", True, np.unsignedinteger, is_positive_value), - ("attrs.nms_thresh", True, np.floating, is_positive_value), - ("attrs.feat_stride", True, np.unsignedinteger, is_positive_value), - ("attrs.min_size", True, np.unsignedinteger, is_positive_value), - ("attrs.ratio", True, np.floating, None), - ("attrs.scale", True, np.floating, None), - ("attrs.clip_before_nms", False, np.bool_, None), - ("attrs.clip_after_nms", False, np.bool_, None), - ("attrs.normalize", False, np.bool_, None), - ("attrs.box_size_scale", False, np.floating, is_positive_value), - ("attrs.box_coordinate_scale", False, np.floating, is_positive_value), - ("attrs.framework", False, np.str_, None), + ("base_size", True, np.unsignedinteger, is_positive_value), + ("pre_nms_topn", True, np.unsignedinteger, is_positive_value), + ("post_nms_topn", True, np.unsignedinteger, is_positive_value), + ("nms_thresh", True, np.floating, is_positive_value), + ("feat_stride", True, np.unsignedinteger, is_positive_value), + ("min_size", True, np.unsignedinteger, is_positive_value), + ("ratio", True, np.floating, None), + ("scale", True, np.floating, None), + ("clip_before_nms", False, np.bool_, None), + ("clip_after_nms", False, np.bool_, None), + ("normalize", False, np.bool_, None), + ("box_size_scale", False, np.floating, is_positive_value), + ("box_coordinate_scale", False, np.floating, is_positive_value), + ("framework", False, np.str_, None), ] check_valid_attributes("Proposal", attrs, requirements) return _get_node_factory_opset1().create( - "Proposal", [class_probs, box_logits, as_node(image_shape)], attrs + "Proposal", [class_probs, bbox_deltas, as_node(image_shape)], attrs ) diff --git a/ngraph/python/src/ngraph/opset4/__init__.py b/ngraph/python/src/ngraph/opset4/__init__.py index d71772db6f0..21edb622a66 100644 --- a/ngraph/python/src/ngraph/opset4/__init__.py +++ b/ngraph/python/src/ngraph/opset4/__init__.py @@ -104,7 +104,7 @@ from ngraph.opset1.ops import prelu from ngraph.opset1.ops import prior_box from ngraph.opset1.ops import prior_box_clustered from ngraph.opset1.ops import psroi_pooling -from ngraph.opset1.ops import proposal +from ngraph.opset4.ops import proposal from ngraph.opset1.ops import range from ngraph.opset3.ops import read_value from ngraph.opset1.ops import reduce_logical_and diff --git a/ngraph/python/src/ngraph/opset4/ops.py b/ngraph/python/src/ngraph/opset4/ops.py index 15f39cc5413..5d4aec06aa7 100644 --- a/ngraph/python/src/ngraph/opset4/ops.py +++ b/ngraph/python/src/ngraph/opset4/ops.py @@ -196,3 +196,120 @@ def atanh(node: NodeInput, name: Optional[str] = None) -> Node: :return: New node with arctanh operation applied on it. """ return _get_node_factory_opset4().create("Atanh", [node]) + + +@nameable_op +def proposal( + class_probs: Node, + bbox_deltas: Node, + image_shape: NodeInput, + attrs: dict, + name: Optional[str] = None, +) -> Node: + """Filter bounding boxes and outputs only those with the highest prediction confidence. + + :param class_probs: 4D input floating point tensor with class prediction scores. + :param bbox_deltas: 4D input floating point tensor with corrected predictions of bounding boxes + :param image_shape: The 1D input tensor with 3 or 4 elements describing image shape. + :param attrs: The dictionary containing key, value pairs for attributes. + :param name: Optional name for the output node. + * base_size The size of the anchor to which scale and ratio attributes are applied. + Range of values: a positive unsigned integer number + Default value: None + Required: yes + * pre_nms_topn The number of bounding boxes before the NMS operation. + Range of values: a positive unsigned integer number + Default value: None + Required: yes + * post_nms_topn The number of bounding boxes after the NMS operation. + Range of values: a positive unsigned integer number + Default value: None + Required: yes + * nms_thresh The minimum value of the proposal to be taken into consideration. + Range of values: a positive floating-point number + Default value: None + Required: yes + * feat_stride The step size to slide over boxes (in pixels). + Range of values: a positive unsigned integer + Default value: None + Required: yes + * min_size The minimum size of box to be taken into consideration. + Range of values: a positive unsigned integer number + Default value: None + Required: yes + * ratio The ratios for anchor generation. + Range of values: a list of floating-point numbers + Default value: None + Required: yes + * scale The scales for anchor generation. + Range of values: a list of floating-point numbers + Default value: None + Required: yes + * clip_before_nms The flag that specifies whether to perform clip bounding boxes before + non-maximum suppression or not. + Range of values: True or False + Default value: True + Required: no + * clip_after_nms The flag that specifies whether to perform clip bounding boxes after + non-maximum suppression or not. + Range of values: True or False + Default value: False + Required: no + * normalize The flag that specifies whether to perform normalization of output boxes to + [0,1] interval or not. + Range of values: True or False + Default value: False + Required: no + * box_size_scale Specifies the scale factor applied to logits of box sizes before decoding. + Range of values: a positive floating-point number + Default value: 1.0 + Required: no + * box_coordinate_scale Specifies the scale factor applied to logits of box coordinates + before decoding. + Range of values: a positive floating-point number + Default value: 1.0 + Required: no + * framework Specifies how the box coordinates are calculated. + Range of values: "" (empty string) - calculate box coordinates like in Caffe* + tensorflow - calculate box coordinates like in the TensorFlow* + Object Detection API models + Default value: "" (empty string) + Required: no + Example of attribute dictionary: + .. code-block:: python + # just required ones + attrs = { + 'base_size': 85, + 'pre_nms_topn': 10, + 'post_nms_topn': 20, + 'nms_thresh': 0.34, + 'feat_stride': 16, + 'min_size': 32, + 'ratio': [0.1, 1.5, 2.0, 2.5], + 'scale': [2, 3, 3, 4], + } + Optional attributes which are absent from dictionary will be set with corresponding default. + :return: Node representing Proposal operation. + """ + requirements = [ + ("base_size", True, np.unsignedinteger, is_positive_value), + ("pre_nms_topn", True, np.unsignedinteger, is_positive_value), + ("post_nms_topn", True, np.unsignedinteger, is_positive_value), + ("nms_thresh", True, np.floating, is_positive_value), + ("feat_stride", True, np.unsignedinteger, is_positive_value), + ("min_size", True, np.unsignedinteger, is_positive_value), + ("ratio", True, np.floating, None), + ("scale", True, np.floating, None), + ("clip_before_nms", False, np.bool_, None), + ("clip_after_nms", False, np.bool_, None), + ("normalize", False, np.bool_, None), + ("box_size_scale", False, np.floating, is_positive_value), + ("box_coordinate_scale", False, np.floating, is_positive_value), + ("framework", False, np.str_, None), + ] + + check_valid_attributes("Proposal", attrs, requirements) + + return _get_node_factory_opset4().create( + "Proposal", [class_probs, bbox_deltas, as_node(image_shape)], attrs + ) diff --git a/ngraph/python/tests/test_ngraph/test_create_op.py b/ngraph/python/tests/test_ngraph/test_create_op.py index c1aa08ee68e..1689e43087b 100644 --- a/ngraph/python/tests/test_ngraph/test_create_op.py +++ b/ngraph/python/tests/test_ngraph/test_create_op.py @@ -750,25 +750,25 @@ def test_detection_output(int_dtype, fp_dtype): ) def test_proposal(int_dtype, fp_dtype): attributes = { - "attrs.base_size": int_dtype(1), - "attrs.pre_nms_topn": int_dtype(20), - "attrs.post_nms_topn": int_dtype(64), - "attrs.nms_thresh": fp_dtype(0.34), - "attrs.feat_stride": int_dtype(16), - "attrs.min_size": int_dtype(32), - "attrs.ratio": np.array([0.1, 1.5, 2.0, 2.5], dtype=fp_dtype), - "attrs.scale": np.array([2, 3, 3, 4], dtype=fp_dtype), + "base_size": int_dtype(1), + "pre_nms_topn": int_dtype(20), + "post_nms_topn": int_dtype(64), + "nms_thresh": fp_dtype(0.34), + "feat_stride": int_dtype(16), + "min_size": int_dtype(32), + "ratio": np.array([0.1, 1.5, 2.0, 2.5], dtype=fp_dtype), + "scale": np.array([2, 3, 3, 4], dtype=fp_dtype), } batch_size = 7 class_probs = ng.parameter([batch_size, 12, 34, 62], fp_dtype, "class_probs") - class_logits = ng.parameter([batch_size, 24, 34, 62], fp_dtype, "class_logits") + bbox_deltas = ng.parameter([batch_size, 24, 34, 62], fp_dtype, "bbox_deltas") image_shape = ng.parameter([3], fp_dtype, "image_shape") - node = ng.proposal(class_probs, class_logits, image_shape, attributes) + node = ng.proposal(class_probs, bbox_deltas, image_shape, attributes) assert node.get_type_name() == "Proposal" - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == [batch_size * attributes["attrs.post_nms_topn"], 5] + assert node.get_output_size() == 2 + assert list(node.get_output_shape(0)) == [batch_size * attributes["post_nms_topn"], 5] def test_tensor_iterator(): diff --git a/ngraph/python/tests/test_ngraph/test_dyn_attributes.py b/ngraph/python/tests/test_ngraph/test_dyn_attributes.py index 7fa8e91e1a6..7c1ea8d87ce 100644 --- a/ngraph/python/tests/test_ngraph/test_dyn_attributes.py +++ b/ngraph/python/tests/test_ngraph/test_dyn_attributes.py @@ -22,21 +22,21 @@ import ngraph as ng @pytest.fixture() def _proposal_node(): attributes = { - "attrs.base_size": np.uint16(1), - "attrs.pre_nms_topn": np.uint16(20), - "attrs.post_nms_topn": np.uint16(64), - "attrs.nms_thresh": np.float64(0.34), - "attrs.feat_stride": np.uint16(16), - "attrs.min_size": np.uint16(32), - "attrs.ratio": np.array([0.1, 1.5, 2.0, 2.5], dtype=np.float64), - "attrs.scale": np.array([2, 3, 3, 4], dtype=np.float64), + "base_size": np.uint16(1), + "pre_nms_topn": np.uint16(20), + "post_nms_topn": np.uint16(64), + "nms_thresh": np.float64(0.34), + "feat_stride": np.uint16(16), + "min_size": np.uint16(32), + "ratio": np.array([0.1, 1.5, 2.0, 2.5], dtype=np.float64), + "scale": np.array([2, 3, 3, 4], dtype=np.float64), } batch_size = 7 class_probs = ng.parameter([batch_size, 12, 34, 62], np.float64, "class_probs") - class_logits = ng.parameter([batch_size, 24, 34, 62], np.float64, "class_logits") + bbox_deltas = ng.parameter([batch_size, 24, 34, 62], np.float64, "bbox_deltas") image_shape = ng.parameter([3], np.float64, "image_shape") - return ng.proposal(class_probs, class_logits, image_shape, attributes) + return ng.proposal(class_probs, bbox_deltas, image_shape, attributes) def test_dynamic_attributes_softmax(): @@ -124,21 +124,21 @@ def test_dynamic_get_attribute_value(int_dtype, fp_dtype): ) def test_dynamic_set_attribute_value(int_dtype, fp_dtype): attributes = { - "attrs.base_size": int_dtype(1), - "attrs.pre_nms_topn": int_dtype(20), - "attrs.post_nms_topn": int_dtype(64), - "attrs.nms_thresh": fp_dtype(0.34), - "attrs.feat_stride": int_dtype(16), - "attrs.min_size": int_dtype(32), - "attrs.ratio": np.array([0.1, 1.5, 2.0, 2.5], dtype=fp_dtype), - "attrs.scale": np.array([2, 3, 3, 4], dtype=fp_dtype), + "base_size": int_dtype(1), + "pre_nms_topn": int_dtype(20), + "post_nms_topn": int_dtype(64), + "nms_thresh": fp_dtype(0.34), + "feat_stride": int_dtype(16), + "min_size": int_dtype(32), + "ratio": np.array([0.1, 1.5, 2.0, 2.5], dtype=fp_dtype), + "scale": np.array([2, 3, 3, 4], dtype=fp_dtype), } batch_size = 7 class_probs = ng.parameter([batch_size, 12, 34, 62], fp_dtype, "class_probs") - class_logits = ng.parameter([batch_size, 24, 34, 62], fp_dtype, "class_logits") + bbox_deltas = ng.parameter([batch_size, 24, 34, 62], fp_dtype, "bbox_deltas") image_shape = ng.parameter([3], fp_dtype, "image_shape") - node = ng.proposal(class_probs, class_logits, image_shape, attributes) + node = ng.proposal(class_probs, bbox_deltas, image_shape, attributes) node.set_base_size(int_dtype(15)) node.set_pre_nms_topn(int_dtype(7)) diff --git a/ngraph/python/tests/test_ngraph/test_proposal.py b/ngraph/python/tests/test_ngraph/test_proposal.py new file mode 100644 index 00000000000..26781da53ff --- /dev/null +++ b/ngraph/python/tests/test_ngraph/test_proposal.py @@ -0,0 +1,48 @@ +# ****************************************************************************** +# Copyright 2017-2020 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ****************************************************************************** +import numpy as np +import ngraph as ng +from ngraph.impl import Shape, Type + + +def test_proposal_props(): + float_dtype = np.float32 + batch_size = 1 + post_nms_topn = 20 + probs = ng.parameter(Shape([batch_size, 8, 255, 255]), dtype=float_dtype, name="probs") + deltas = ng.parameter(Shape([batch_size, 16, 255, 255]), dtype=float_dtype, name="bbox_deltas") + im_info = ng.parameter(Shape([4]), dtype=float_dtype, name="im_info") + + attrs = { + "base_size": np.uint32(85), + "pre_nms_topn": np.uint32(10), + "post_nms_topn": np.uint32(post_nms_topn), + "nms_thresh": np.float32(0.34), + "feat_stride": np.uint32(16), + "min_size": np.uint32(32), + "ratio": np.array([0.1, 1.5, 2.0, 2.5], dtype=np.float32), + "scale": np.array([2, 3, 3, 4], dtype=np.float32), + } + + node = ng.proposal(probs, deltas, im_info, attrs) + + assert node.get_type_name() == "Proposal" + assert node.get_output_size() == 2 + + assert list(node.get_output_shape(0)) == [batch_size * post_nms_topn, 5] + assert list(node.get_output_shape(1)) == [batch_size * post_nms_topn] + assert node.get_output_element_type(0) == Type.f32 + assert node.get_output_element_type(1) == Type.f32 diff --git a/ngraph/test/attributes.cpp b/ngraph/test/attributes.cpp index 7d0afd7a81f..2ad52f8027b 100644 --- a/ngraph/test/attributes.cpp +++ b/ngraph/test/attributes.cpp @@ -343,7 +343,8 @@ protected: constexpr NodeTypeInfo Oracle::type_info; -TEST(attributes, user_op) +// todo: temporary disabled until bug with AttributeVisitor is fixed #35906 +TEST(attributes, DISABLED_user_op) { FactoryRegistry::get().register_factory(); auto program = make_shared(element::i32, Shape{200}); diff --git a/ngraph/test/type_prop/proposal.cpp b/ngraph/test/type_prop/proposal.cpp index eb360680ad2..9b92b790bf6 100644 --- a/ngraph/test/type_prop/proposal.cpp +++ b/ngraph/test/type_prop/proposal.cpp @@ -22,16 +22,19 @@ using namespace std; using namespace ngraph; -TEST(type_prop, proposal_invalid_class_probs_rank) +// ------------------------------ V0 ------------------------------ + +TEST(type_prop, proposal_v0_invalid_class_probs_rank) { op::ProposalAttrs attrs; auto class_probs = make_shared(element::f32, Shape{1, 2, 3}); - auto class_logits = make_shared(element::f32, Shape{1, 2, 3, 4}); + auto class_bbox_deltas = make_shared(element::f32, Shape{1, 2, 3, 4}); auto image_shape = make_shared(element::f32, Shape{3}); try { - auto proposal = make_shared(class_probs, class_logits, image_shape, attrs); + auto proposal = + make_shared(class_probs, class_bbox_deltas, image_shape, attrs); // Should have thrown, so fail if it didn't FAIL() << "Invalid input tensor rank."; } @@ -46,16 +49,17 @@ TEST(type_prop, proposal_invalid_class_probs_rank) } } -TEST(type_prop, proposal_invalid_class_logits_rank) +TEST(type_prop, proposal_v0_invalid_class_bbox_deltas_rank) { op::ProposalAttrs attrs; auto class_probs = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto class_logits = make_shared(element::f32, Shape{1, 2, 3}); + auto class_bbox_deltas = make_shared(element::f32, Shape{1, 2, 3}); auto image_shape = make_shared(element::f32, Shape{3}); try { - auto proposal = make_shared(class_probs, class_logits, image_shape, attrs); + auto proposal = + make_shared(class_probs, class_bbox_deltas, image_shape, attrs); // Should have thrown, so fail if it didn't FAIL() << "Invalid input tensor rank."; } @@ -63,7 +67,7 @@ TEST(type_prop, proposal_invalid_class_logits_rank) { EXPECT_HAS_SUBSTRING( error.what(), - std::string("Proposal layer shape class_logits_shape input must have rank 4")); + std::string("Proposal layer shape class_bbox_deltas_shape input must have rank 4")); } catch (...) { @@ -71,16 +75,17 @@ TEST(type_prop, proposal_invalid_class_logits_rank) } } -TEST(type_prop, proposal_invalid_image_shape_rank) +TEST(type_prop, proposal_v0_invalid_image_shape_rank) { op::ProposalAttrs attrs; auto class_probs = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto class_logits = make_shared(element::f32, Shape{1, 2, 3, 4}); + auto class_bbox_deltas = make_shared(element::f32, Shape{1, 2, 3, 4}); auto image_shape = make_shared(element::f32, Shape{2, 1}); try { - auto proposal = make_shared(class_probs, class_logits, image_shape, attrs); + auto proposal = + make_shared(class_probs, class_bbox_deltas, image_shape, attrs); // Should have thrown, so fail if it didn't FAIL() << "Invalid input tensor rank."; } @@ -95,16 +100,17 @@ TEST(type_prop, proposal_invalid_image_shape_rank) } } -TEST(type_prop, proposal_invalid_image_shape_size) +TEST(type_prop, proposal_v0_invalid_image_shape_size) { op::ProposalAttrs attrs; auto class_probs = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto class_logits = make_shared(element::f32, Shape{1, 2, 3, 4}); + auto class_bbox_deltas = make_shared(element::f32, Shape{1, 2, 3, 4}); auto image_shape = make_shared(element::f32, Shape{5}); try { - auto proposal = make_shared(class_probs, class_logits, image_shape, attrs); + auto proposal = + make_shared(class_probs, class_bbox_deltas, image_shape, attrs); // Should have thrown, so fail if it didn't FAIL() << "Invalid input tensor rank."; } @@ -120,3 +126,141 @@ TEST(type_prop, proposal_invalid_image_shape_size) FAIL() << "Deduced type check failed for unexpected reason"; } } + +TEST(type_prop, proposal_v0_shape_infer) +{ + op::ProposalAttrs attrs; + attrs.base_size = 1; + attrs.pre_nms_topn = 20; + attrs.post_nms_topn = 200; + const size_t batch_size = 7; + + auto class_probs = make_shared(element::f32, Shape{batch_size, 12, 34, 62}); + auto class_bbox_deltas = + make_shared(element::f32, Shape{batch_size, 24, 34, 62}); + auto image_shape = make_shared(element::f32, Shape{3}); + auto op = make_shared(class_probs, class_bbox_deltas, image_shape, attrs); + ASSERT_EQ(op->get_output_shape(0), (Shape{batch_size * attrs.post_nms_topn, 5})); +} + +// ------------------------------ V4 ------------------------------ + +TEST(type_prop, proposal_v4_invalid_class_probs_rank) +{ + op::ProposalAttrs attrs; + auto class_probs = make_shared(element::f32, Shape{1, 2, 3}); + auto class_bbox_deltas = make_shared(element::f32, Shape{1, 2, 3, 4}); + auto image_shape = make_shared(element::f32, Shape{3}); + + try + { + auto proposal = + make_shared(class_probs, class_bbox_deltas, image_shape, attrs); + // Should have thrown, so fail if it didn't + FAIL() << "Invalid input tensor rank."; + } + catch (const NodeValidationFailure& error) + { + EXPECT_HAS_SUBSTRING( + error.what(), std::string("Proposal layer shape class_probs input must have rank 4")); + } + catch (...) + { + FAIL() << "Deduced type check failed for unexpected reason"; + } +} + +TEST(type_prop, proposal_v4_invalid_class_bbox_deltas_rank) +{ + op::ProposalAttrs attrs; + auto class_probs = make_shared(element::f32, Shape{1, 2, 3, 4}); + auto class_bbox_deltas = make_shared(element::f32, Shape{1, 2, 3}); + auto image_shape = make_shared(element::f32, Shape{3}); + + try + { + auto proposal = + make_shared(class_probs, class_bbox_deltas, image_shape, attrs); + // Should have thrown, so fail if it didn't + FAIL() << "Invalid input tensor rank."; + } + catch (const NodeValidationFailure& error) + { + EXPECT_HAS_SUBSTRING( + error.what(), + std::string("Proposal layer shape class_bbox_deltas_shape input must have rank 4")); + } + catch (...) + { + FAIL() << "Deduced type check failed for unexpected reason"; + } +} + +TEST(type_prop, proposal_v4_invalid_image_shape_rank) +{ + op::ProposalAttrs attrs; + auto class_probs = make_shared(element::f32, Shape{1, 2, 3, 4}); + auto class_bbox_deltas = make_shared(element::f32, Shape{1, 2, 3, 4}); + auto image_shape = make_shared(element::f32, Shape{2, 1}); + + try + { + auto proposal = + make_shared(class_probs, class_bbox_deltas, image_shape, attrs); + // Should have thrown, so fail if it didn't + FAIL() << "Invalid input tensor rank."; + } + catch (const NodeValidationFailure& error) + { + EXPECT_HAS_SUBSTRING(error.what(), + std::string("Proposal layer image_shape input must have rank 1")); + } + catch (...) + { + FAIL() << "Deduced type check failed for unexpected reason"; + } +} + +TEST(type_prop, proposal_v4_invalid_image_shape_size) +{ + op::ProposalAttrs attrs; + auto class_probs = make_shared(element::f32, Shape{1, 2, 3, 4}); + auto class_bbox_deltas = make_shared(element::f32, Shape{1, 2, 3, 4}); + auto image_shape = make_shared(element::f32, Shape{5}); + + try + { + auto proposal = + make_shared(class_probs, class_bbox_deltas, image_shape, attrs); + // Should have thrown, so fail if it didn't + FAIL() << "Invalid input tensor rank."; + } + catch (const NodeValidationFailure& error) + { + EXPECT_HAS_SUBSTRING( + error.what(), + std::string( + "Image_shape 1D tensor must have => 3 and <= 4 elements (image_shape_shape[0]")); + } + catch (...) + { + FAIL() << "Deduced type check failed for unexpected reason"; + } +} + +TEST(type_prop, proposal_v4_shape_infer) +{ + op::ProposalAttrs attrs; + attrs.base_size = 1; + attrs.pre_nms_topn = 20; + attrs.post_nms_topn = 200; + const size_t batch_size = 7; + + auto class_probs = make_shared(element::f32, Shape{batch_size, 12, 34, 62}); + auto class_bbox_deltas = + make_shared(element::f32, Shape{batch_size, 24, 34, 62}); + auto image_shape = make_shared(element::f32, Shape{3}); + auto op = make_shared(class_probs, class_bbox_deltas, image_shape, attrs); + ASSERT_EQ(op->get_output_shape(0), (Shape{batch_size * attrs.post_nms_topn, 5})); + ASSERT_EQ(op->get_output_shape(1), (Shape{batch_size * attrs.post_nms_topn})); +} diff --git a/ngraph/test/type_prop_layers.cpp b/ngraph/test/type_prop_layers.cpp index 6b9f253d509..7ab9793399f 100644 --- a/ngraph/test/type_prop_layers.cpp +++ b/ngraph/test/type_prop_layers.cpp @@ -22,12 +22,10 @@ #include "ngraph/op/interpolate.hpp" #include "ngraph/op/prior_box.hpp" #include "ngraph/op/prior_box_clustered.hpp" -#include "ngraph/op/proposal.hpp" #include "ngraph/op/psroi_pooling.hpp" #include "ngraph/op/region_yolo.hpp" #include "ngraph/op/reorg_yolo.hpp" #include "ngraph/op/roi_pooling.hpp" -#include "util/type_prop.hpp" #include using namespace std; @@ -155,21 +153,6 @@ TEST(type_prop_layers, prior_box_clustered) ASSERT_EQ(pbc->get_shape(), (Shape{2, 4332})); } -TEST(type_prop_layers, proposal) -{ - op::ProposalAttrs attrs; - attrs.base_size = 1; - attrs.pre_nms_topn = 20; - attrs.post_nms_topn = 200; - const size_t batch_size = 7; - - auto class_probs = make_shared(element::f32, Shape{batch_size, 12, 34, 62}); - auto class_logits = make_shared(element::f32, Shape{batch_size, 24, 34, 62}); - auto image_shape = make_shared(element::f32, Shape{3}); - auto op = make_shared(class_probs, class_logits, image_shape, attrs); - ASSERT_EQ(op->get_shape(), (Shape{batch_size * attrs.post_nms_topn, 5})); -} - TEST(type_prop_layers, region_yolo1) { auto inputs = make_shared(element::f32, Shape{1, 125, 13, 13});