[CPU] PriorBox & PriorBoxClustered dynamism enabling (#8597)

This commit is contained in:
Edward Shogulin 2021-12-30 17:43:16 +03:00 committed by GitHub
parent 8ba94cfb8f
commit ec5198094a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
13 changed files with 1073 additions and 10 deletions

View File

@ -68,7 +68,7 @@ void ov::op::v0::PriorBoxClustered::validate_and_infer_types() {
const auto num_priors = m_attrs.widths.size();
set_output_type(0, element::f32, ov::Shape{2, 4 * layer_shape[0] * layer_shape[1] * num_priors});
} else {
set_output_type(0, element::f32, ov::PartialShape::dynamic());
set_output_type(0, element::f32, ov::PartialShape{2, Dimension::dynamic()});
}
}

View File

@ -185,7 +185,9 @@ const InferenceEngine::details::caseless_unordered_map<std::string, Type> type_t
{ "MatrixNms", MatrixNms},
{ "MulticlassNms", MulticlassNms},
{ "Reference", Reference},
{ "Subgraph", Subgraph}
{ "Subgraph", Subgraph},
{ "PriorBox", PriorBox},
{ "PriorBoxClustered", PriorBoxClustered},
};
Type TypeFromName(const std::string& type) {

View File

@ -101,7 +101,9 @@ enum Type {
NonMaxSuppression,
MatrixNms,
MulticlassNms,
Subgraph
Subgraph,
PriorBox,
PriorBoxClustered,
};
enum Algorithm {

View File

@ -83,6 +83,8 @@
#include "nodes/mkldnn_non_zero.h"
#include "nodes/mkldnn_color_convert_node.h"
#include "nodes/subgraph.h"
#include "nodes/mkldnn_priorbox_node.h"
#include "nodes/mkldnn_priorbox_clustered_node.h"
#define MKLDNN_NODE(__prim, __type) \
registerNodeIfRequired(MKLDNNPlugin, __prim, __type, MKLDNNNodeImpl<__prim>)
@ -174,4 +176,6 @@ MKLDNNPlugin::MKLDNNNode::NodesFactory::NodesFactory()
MKLDNN_NODE(MKLDNNNonZeroNode, NonZero);
MKLDNN_NODE(MKLDNNSnippetNode, Subgraph);
MKLDNN_NODE(MKLDNNColorConvertNode, ColorConvert);
MKLDNN_NODE(MKLDNNPriorBoxNode, PriorBox);
MKLDNN_NODE(MKLDNNPriorBoxClusteredNode, PriorBoxClustered);
}

View File

@ -0,0 +1,170 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "mkldnn_priorbox_clustered_node.h"
#include <algorithm>
#include <cmath>
#include <memory>
#include <vector>
#include <ie_parallel.hpp>
#include <mkldnn_types.h>
#include <ngraph/ngraph.hpp>
#include <ngraph/opsets/opset1.hpp>
using namespace MKLDNNPlugin;
using namespace InferenceEngine;
bool MKLDNNPriorBoxClusteredNode::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept {
try {
const auto priorBox = std::dynamic_pointer_cast<const ngraph::opset1::PriorBoxClustered>(op);
if (!priorBox) {
errorMessage = "Only opset1 PriorBoxClustered operation is supported";
return false;
}
} catch (...) {
return false;
}
return true;
}
MKLDNNPriorBoxClusteredNode::MKLDNNPriorBoxClusteredNode(
const std::shared_ptr<ngraph::Node>& op,
const mkldnn::engine& eng,
MKLDNNWeightsSharing::Ptr &cache) : MKLDNNNode(op, eng, cache) {
std::string errorMessage;
if (!isSupportedOperation(op, errorMessage)) {
IE_THROW(NotImplemented) << errorMessage;
}
const auto priorBox = std::dynamic_pointer_cast<const ngraph::opset1::PriorBoxClustered>(op);
const ngraph::opset1::PriorBoxClustered::Attributes& attrs = priorBox->get_attrs();
widths = attrs.widths;
heights = attrs.heights;
clip = attrs.clip;
variances = attrs.variances;
step = attrs.step;
step_heights = attrs.step_heights;
step_widths = attrs.step_widths;
offset = attrs.offset;
number_of_priors = widths.size();
if (variances.empty()) {
variances.push_back(0.1f);
}
}
bool MKLDNNPriorBoxClusteredNode::needShapeInfer() const {
auto& memory = getChildEdgeAt(0)->getMemoryPtr();
if (memory->GetShape().isDynamic()) {
return true;
}
const auto& outputShape = memory->GetShape().getStaticDims();
const int* in_data = reinterpret_cast<int*>(memory->GetPtr());
const int h = in_data[0];
const int w = in_data[1];
const auto output = static_cast<size_t>(4 * h * w * number_of_priors);
return outputShape[1] != output;
}
std::vector<VectorDims> MKLDNNPriorBoxClusteredNode::shapeInfer() const {
const int* in_data = reinterpret_cast<int*>(getParentEdgeAt(0)->getMemoryPtr()->GetPtr());
const int H = in_data[0];
const int W = in_data[1];
const auto output = static_cast<size_t>(4 * H * W * number_of_priors);
return {{2, output}};
}
bool MKLDNNPriorBoxClusteredNode::needPrepareParams() const {
return false;
}
void MKLDNNPriorBoxClusteredNode::initSupportedPrimitiveDescriptors() {
if (!supportedPrimitiveDescriptors.empty())
return;
addSupportedPrimDesc(
{{LayoutType::ncsp, Precision::I32}, {LayoutType::ncsp, Precision::I32}},
{{LayoutType::ncsp, Precision::FP32}},
impl_desc_type::ref_any);
}
void MKLDNNPriorBoxClusteredNode::createPrimitive() {
if (inputShapesDefined()) {
if (needPrepareParams())
prepareParams();
updateLastInputDims();
}
}
void MKLDNNPriorBoxClusteredNode::execute(mkldnn::stream strm) {
const int* in_data = reinterpret_cast<int*>(getParentEdgeAt(0)->getMemoryPtr()->GetPtr());
const int layer_height = in_data[0];
const int layer_width = in_data[1];
const int* in_image = reinterpret_cast<int*>(getParentEdgeAt(1)->getMemoryPtr()->GetPtr());
int img_height = in_image[0];
int img_width = in_image[1];
float step_w = step_widths == 0 ? step : step_widths;
float step_h = step_heights == 0 ? step : step_heights;
if (step_w == 0 && step_h == 0) {
step_w = static_cast<float>(img_width) / layer_width;
step_h = static_cast<float>(img_height) / layer_height;
}
float* dst_data = reinterpret_cast<float*>(getChildEdgeAt(0)->getMemoryPtr()->GetPtr());
const auto& out_shape = getChildEdgeAt(0)->getMemory().GetShape().getStaticDims();
size_t var_size = variances.size();
parallel_for2d(layer_height, layer_width, [&](int64_t h, int64_t w) {
float center_x = (w + offset) * step_w;
float center_y = (h + offset) * step_h;
for (size_t s = 0; s < number_of_priors; ++s) {
float box_width = widths[s];
float box_height = heights[s];
float xmin = (center_x - box_width / 2.0f) / img_width;
float ymin = (center_y - box_height / 2.0f) / img_height;
float xmax = (center_x + box_width / 2.0f) / img_width;
float ymax = (center_y + box_height / 2.0f) / img_height;
if (clip) {
xmin = (std::min)((std::max)(xmin, 0.0f), 1.0f);
ymin = (std::min)((std::max)(ymin, 0.0f), 1.0f);
xmax = (std::min)((std::max)(xmax, 0.0f), 1.0f);
ymax = (std::min)((std::max)(ymax, 0.0f), 1.0f);
}
const uint64_t idx = h * layer_width * number_of_priors * 4 + w * number_of_priors * 4 + s * 4;
dst_data[idx + 0] = xmin;
dst_data[idx + 1] = ymin;
dst_data[idx + 2] = xmax;
dst_data[idx + 3] = ymax;
// At this point we have either:
// 1. A single variance value (to be repeated 4 times for each prior)
// 2. 4 variance values
if (var_size == 1) {
for (size_t j = 0; j < 4; j++)
dst_data[idx + j + out_shape[1]] = variances[0];
} else {
for (size_t j = 0; j < var_size; j++)
dst_data[idx + j + out_shape[1]] = variances[j];
}
}
});
}
bool MKLDNNPriorBoxClusteredNode::created() const {
return getType() == PriorBoxClustered;
}
REG_MKLDNN_PRIM_FOR(MKLDNNPriorBoxClusteredNode, PriorBoxClustered)

View File

@ -0,0 +1,46 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <memory>
#include <vector>
#include <ie_common.h>
#include <mkldnn_node.h>
namespace MKLDNNPlugin {
class MKLDNNPriorBoxClusteredNode : public MKLDNNNode {
public:
MKLDNNPriorBoxClusteredNode(const std::shared_ptr<ngraph::Node>& op, const mkldnn::engine& eng, MKLDNNWeightsSharing::Ptr &cache);
void getSupportedDescriptors() override {};
void initSupportedPrimitiveDescriptors() override;
void createPrimitive() override;
void execute(mkldnn::stream strm) override;
bool created() const override;
bool needShapeInfer() const override;
std::vector<VectorDims> shapeInfer() const override;
bool needPrepareParams() const override;
void executeDynamicImpl(mkldnn::stream strm) override { execute(strm); }
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept;
private:
std::vector<float> widths;
std::vector<float> heights;
std::vector<float> variances;
bool clip;
float step;
float step_heights;
float step_widths;
float offset;
int number_of_priors;
};
} // namespace MKLDNNPlugin

View File

@ -0,0 +1,324 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "mkldnn_priorbox_node.h"
#include <algorithm>
#include <cmath>
#include <memory>
#include <vector>
#include <ie_parallel.hpp>
#include <mkldnn_types.h>
#include <ngraph/ngraph.hpp>
#include <ngraph/opsets/opset1.hpp>
using namespace MKLDNNPlugin;
using namespace InferenceEngine;
#define THROW_ERROR IE_THROW() << "PriorBox layer with name '" << getName() << "': "
namespace {
float clip_great(float x, float threshold) {
return x < threshold ? x : threshold;
}
float clip_less(float x, float threshold) {
return x > threshold ? x : threshold;
}
}
bool MKLDNNPriorBoxNode::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept {
try {
const auto priorBox = std::dynamic_pointer_cast<const ngraph::opset1::PriorBox>(op);
if (!priorBox) {
errorMessage = "Only opset1 PriorBox operation is supported";
return false;
}
} catch (...) {
return false;
}
return true;
}
MKLDNNPriorBoxNode::MKLDNNPriorBoxNode(
const std::shared_ptr<ngraph::Node>& op,
const mkldnn::engine& eng,
MKLDNNWeightsSharing::Ptr &cache) : MKLDNNNode(op, eng, cache) {
std::string errorMessage;
if (!isSupportedOperation(op, errorMessage)) {
IE_THROW(NotImplemented) << errorMessage;
}
const auto priorBox = std::dynamic_pointer_cast<const ngraph::opset1::PriorBox>(op);
const ngraph::opset1::PriorBox::Attributes& attrs = priorBox->get_attrs();
offset = attrs.offset;
step = attrs.step;
min_size = attrs.min_size;
max_size = attrs.max_size;
flip = attrs.flip;
clip = attrs.clip;
scale_all_sizes = attrs.scale_all_sizes;
fixed_size = attrs.fixed_size;
fixed_ratio = attrs.fixed_ratio;
density = attrs.density;
bool exist;
aspect_ratio.push_back(1.0f);
for (float aspect_ratio_item : attrs.aspect_ratio) {
exist = false;
if (std::fabs(aspect_ratio_item) < std::numeric_limits<float>::epsilon()) {
THROW_ERROR << "Aspect_ratio param can't be equal to zero";
}
for (float _aspect_ratio : aspect_ratio) {
if (fabs(aspect_ratio_item - _aspect_ratio) < 1e-6) {
exist = true;
break;
}
}
if (exist) {
continue;
}
aspect_ratio.push_back(aspect_ratio_item);
if (flip) {
aspect_ratio.push_back(1.0f / aspect_ratio_item);
}
}
number_of_priors = ngraph::opset1::PriorBox::number_of_priors(attrs);
if (attrs.variance.size() == 1 || attrs.variance.size() == 4) {
for (float i : attrs.variance) {
if (i < 0) {
THROW_ERROR << "Variance must be > 0.";
}
variance.push_back(i);
}
} else if (attrs.variance.empty()) {
variance.push_back(0.1f);
} else {
THROW_ERROR << "Wrong number of variance values. Not less than 1 and more than 4 variance values.";
}
}
bool MKLDNNPriorBoxNode::needShapeInfer() const {
auto& memory = getChildEdgeAt(0)->getMemoryPtr();
if (memory->GetShape().isDynamic()) {
return true;
}
const auto& outputShape = memory->GetShape().getStaticDims();
const int* in_data = reinterpret_cast<int*>(memory->GetPtr());
const int h = in_data[0];
const int w = in_data[1];
const auto output = static_cast<size_t>(4 * h * w * number_of_priors);
return outputShape[1] != output;
}
std::vector<VectorDims> MKLDNNPriorBoxNode::shapeInfer() const {
const int* in_data = reinterpret_cast<int*>(getParentEdgeAt(0)->getMemoryPtr()->GetPtr());
const int H = in_data[0];
const int W = in_data[1];
const auto output = static_cast<size_t>(4 * H * W * number_of_priors);
return {{2, output}};
}
bool MKLDNNPriorBoxNode::needPrepareParams() const {
return false;
}
void MKLDNNPriorBoxNode::initSupportedPrimitiveDescriptors() {
if (!supportedPrimitiveDescriptors.empty())
return;
addSupportedPrimDesc(
{{LayoutType::ncsp, Precision::I32}, {LayoutType::ncsp, Precision::I32}},
{{LayoutType::ncsp, Precision::FP32}},
impl_desc_type::ref_any);
}
void MKLDNNPriorBoxNode::createPrimitive() {
if (inputShapesDefined()) {
if (needPrepareParams())
prepareParams();
updateLastInputDims();
}
}
void MKLDNNPriorBoxNode::execute(mkldnn::stream strm) {
const int* in_data = reinterpret_cast<int*>(getParentEdgeAt(0)->getMemoryPtr()->GetPtr());
const int H = in_data[0];
const int W = in_data[1];
const int* in_image = reinterpret_cast<int*>(getParentEdgeAt(1)->getMemoryPtr()->GetPtr());
const int IH = in_image[0];
const int IW = in_image[1];
const int OH = 4 * H * W * number_of_priors;
const int OW = 1;
float* dst_data = reinterpret_cast<float*>(getChildEdgeAt(0)->getMemoryPtr()->GetPtr());
float step_ = step;
auto min_size_ = min_size;
if (!scale_all_sizes) {
// mxnet-like PriorBox
if (step_ == -1)
step_ = 1.f * IH / H;
else
step_ *= IH;
for (auto& size : min_size_)
size *= IH;
}
int64_t idx = 0;
float center_x, center_y, box_width, box_height, step_x, step_y;
float IWI = 1.0f / static_cast<float>(IW);
float IHI = 1.0f / static_cast<float>(IH);
if (step_ == 0) {
step_x = static_cast<float>(IW) / W;
step_y = static_cast<float>(IH) / H;
} else {
step_x = step_;
step_y = step_;
}
auto calculate_data =
[&dst_data, &IWI, &IHI, &idx](float center_x, float center_y, float box_width, float box_height, bool clip) {
if (clip) {
// order: xmin, ymin, xmax, ymax
dst_data[idx++] = clip_less((center_x - box_width) * IWI, 0);
dst_data[idx++] = clip_less((center_y - box_height) * IHI, 0);
dst_data[idx++] = clip_great((center_x + box_width) * IWI, 1);
dst_data[idx++] = clip_great((center_y + box_height) * IHI, 1);
} else {
dst_data[idx++] = (center_x - box_width) * IWI;
dst_data[idx++] = (center_y - box_height) * IHI;
dst_data[idx++] = (center_x + box_width) * IWI;
dst_data[idx++] = (center_y + box_height) * IHI;
}
};
for (int64_t h = 0; h < H; ++h) {
for (int64_t w = 0; w < W; ++w) {
if (step_ == 0) {
center_x = (w + 0.5f) * step_x;
center_y = (h + 0.5f) * step_y;
} else {
center_x = (offset + w) * step_;
center_y = (offset + h) * step_;
}
for (size_t s = 0; s < fixed_size.size(); ++s) {
auto fixed_size_ = static_cast<size_t>(fixed_size[s]);
box_width = box_height = fixed_size_ * 0.5f;
if (!fixed_ratio.empty()) {
for (float ar : fixed_ratio) {
auto density_ = static_cast<int64_t>(density[s]);
auto shift = static_cast<int64_t>(fixed_size[s] / density_);
ar = std::sqrt(ar);
float box_width_ratio = fixed_size[s] * 0.5f * ar;
float box_height_ratio = fixed_size[s] * 0.5f / ar;
for (int64_t r = 0; r < density_; ++r) {
for (int64_t c = 0; c < density_; ++c) {
float center_x_temp = center_x - fixed_size_ / 2 + shift / 2.f + c * shift;
float center_y_temp = center_y - fixed_size_ / 2 + shift / 2.f + r * shift;
calculate_data(center_x_temp, center_y_temp, box_width_ratio, box_height_ratio, true);
}
}
}
} else {
if (!density.empty()) {
auto density_ = static_cast<int64_t>(density[s]);
auto shift = static_cast<int64_t>(fixed_size[s] / density_);
for (int64_t r = 0; r < density_; ++r) {
for (int64_t c = 0; c < density_; ++c) {
float center_x_temp = center_x - fixed_size_ / 2 + shift / 2.f + c * shift;
float center_y_temp = center_y - fixed_size_ / 2 + shift / 2.f + r * shift;
calculate_data(center_x_temp, center_y_temp, box_width, box_height, true);
}
}
}
// Rest of priors
for (float ar : aspect_ratio) {
if (fabs(ar - 1.) < 1e-6) {
continue;
}
auto density_ = static_cast<int64_t>(density[s]);
auto shift = static_cast<int64_t>(fixed_size[s] / density_);
ar = std::sqrt(ar);
float box_width_ratio = fixed_size[s] * 0.5f * ar;
float box_height_ratio = fixed_size[s] * 0.5f / ar;
for (int64_t r = 0; r < density_; ++r) {
for (int64_t c = 0; c < density_; ++c) {
float center_x_temp = center_x - fixed_size_ / 2 + shift / 2.f + c * shift;
float center_y_temp = center_y - fixed_size_ / 2 + shift / 2.f + r * shift;
calculate_data(center_x_temp, center_y_temp, box_width_ratio, box_height_ratio, true);
}
}
}
}
}
for (size_t ms_idx = 0; ms_idx < min_size_.size(); ms_idx++) {
box_width = min_size_[ms_idx] * 0.5f;
box_height = min_size_[ms_idx] * 0.5f;
calculate_data(center_x, center_y, box_width, box_height, false);
if (max_size.size() > ms_idx) {
box_width = box_height = std::sqrt(min_size_[ms_idx] * max_size[ms_idx]) * 0.5f;
calculate_data(center_x, center_y, box_width, box_height, false);
}
if (scale_all_sizes || (!scale_all_sizes && (ms_idx == min_size_.size() - 1))) {
size_t s_idx = scale_all_sizes ? ms_idx : 0;
for (float ar : aspect_ratio) {
if (std::fabs(ar - 1.0f) < 1e-6) {
continue;
}
ar = std::sqrt(ar);
box_width = min_size_[s_idx] * 0.5f * ar;
box_height = min_size_[s_idx] * 0.5f / ar;
calculate_data(center_x, center_y, box_width, box_height, false);
}
}
}
}
}
if (clip) {
parallel_for((H * W * number_of_priors * 4), [&](size_t i) {
dst_data[i] = (std::min)((std::max)(dst_data[i], 0.0f), 1.0f);
});
}
uint64_t channel_size = OH * OW;
if (variance.size() == 1) {
parallel_for(channel_size, [&](size_t i) {
dst_data[i + channel_size] = variance[0];
});
} else {
parallel_for(H * W * number_of_priors, [&](size_t i) {
for (size_t j = 0; j < 4; ++j) {
dst_data[i * 4 + j + channel_size] = variance[j];
}
});
}
}
bool MKLDNNPriorBoxNode::created() const {
return getType() == PriorBox;
}
REG_MKLDNN_PRIM_FOR(MKLDNNPriorBoxNode, PriorBox)

View File

@ -0,0 +1,52 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <memory>
#include <vector>
#include <ie_common.h>
#include <mkldnn_node.h>
namespace MKLDNNPlugin {
class MKLDNNPriorBoxNode : public MKLDNNNode {
public:
MKLDNNPriorBoxNode(const std::shared_ptr<ngraph::Node>& op, const mkldnn::engine& eng, MKLDNNWeightsSharing::Ptr &cache);
void getSupportedDescriptors() override {};
void initSupportedPrimitiveDescriptors() override;
void createPrimitive() override;
void execute(mkldnn::stream strm) override;
bool created() const override;
bool needShapeInfer() const override;
std::vector<VectorDims> shapeInfer() const override;
bool needPrepareParams() const override;
void executeDynamicImpl(mkldnn::stream strm) override { execute(strm); }
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept;
private:
float offset;
float step;
std::vector<float> min_size;
std::vector<float> max_size;
bool flip;
bool clip;
bool scale_all_sizes;
std::vector<float> fixed_size;
std::vector<float> fixed_ratio;
std::vector<float> density;
std::vector<float> aspect_ratio;
std::vector<float> variance;
int number_of_priors;
};
} // namespace MKLDNNPlugin

View File

@ -138,7 +138,8 @@ std::vector<std::string> disabledTestPatterns() {
// bad accuracy
R"(.*smoke_FakeQuantizeLayerCPUTest_Decompos.
*IS=_TS=\(\(4\.5\.6\.7\)\)_RS=\(\(1\.1\.6\.1\)\)_\(\(1\.5\.6\.1\)\)_\(\(1\.1\.1\.1\)\)_\(\(1\.1\.6\.1\)\).*)",
// Issue: 69222
R"(.*smoke_PriorBoxClustered.*PriorBoxClusteredLayerCPUTest.*_netPRC=f16_.*)",
// Issue: 71121
R"(.*smoke_Proposal*.*TS=\(2.*)",
// TODO : CVS-69533

View File

@ -0,0 +1,229 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <string>
#include <sstream>
#include <vector>
#include <openvino/core/partial_shape.hpp>
#include "ngraph_functions/builders.hpp"
#include "shared_test_classes/base/layer_test_utils.hpp"
#include "shared_test_classes/base/ov_subgraph.hpp"
#include "test_utils/cpu_test_utils.hpp"
using namespace InferenceEngine;
using namespace CPUTestUtils;
using namespace ov::test;
namespace CPULayerTestsDefinitions {
using priorBoxSpecificParams = std::tuple<
std::vector<float>, // min_size
std::vector<float>, // max_size
std::vector<float>, // aspect_ratio
std::vector<float>, // density
std::vector<float>, // fixed_ratio
std::vector<float>, // fixed_size
bool, // clip
bool, // flip
float, // step
float, // offset
std::vector<float>, // variance
bool>; // scale_all_sizes
typedef std::tuple<
priorBoxSpecificParams,
ov::test::ElementType, // net precision
ov::test::ElementType, // Input precision
ov::test::ElementType, // Output precision
InferenceEngine::Layout, // Input layout
InferenceEngine::Layout, // Output layout
ov::test::InputShape, // input shape
ov::test::InputShape, // image shape
std::string> priorBoxLayerParams;
class PriorBoxLayerCPUTest : public testing::WithParamInterface<priorBoxLayerParams>,
virtual public SubgraphBaseTest, public CPUTestsBase {
public:
static std::string getTestCaseName(const testing::TestParamInfo<priorBoxLayerParams>& obj) {
ov::test::ElementType netPrecision;
ov::test::ElementType inPrc, outPrc;
InferenceEngine::Layout inLayout, outLayout;
ov::test::InputShape inputShapes;
ov::test::InputShape imageShapes;
std::string targetDevice;
priorBoxSpecificParams specParams;
std::tie(specParams,
netPrecision,
inPrc, outPrc, inLayout, outLayout,
inputShapes,
imageShapes,
targetDevice) = obj.param;
ngraph::op::PriorBoxAttrs attributes;
std::tie(
attributes.min_size,
attributes.max_size,
attributes.aspect_ratio,
attributes.density,
attributes.fixed_ratio,
attributes.fixed_size,
attributes.clip,
attributes.flip,
attributes.step,
attributes.offset,
attributes.variance,
attributes.scale_all_sizes) = specParams;
std::ostringstream result;
const char separator = '_';
result << "IS=" << inputShapes << separator;
result << "imageS=" << imageShapes << separator;
result << "netPRC=" << netPrecision << separator;
result << "inPRC=" << inPrc << separator;
result << "outPRC=" << outPrc << separator;
result << "inL=" << inLayout << separator;
result << "outL=" << outLayout << separator;
result << "min_size=" << CommonTestUtils::vec2str(attributes.min_size) << separator;
result << "max_size=" << CommonTestUtils::vec2str(attributes.max_size)<< separator;
result << "aspect_ratio=" << CommonTestUtils::vec2str(attributes.aspect_ratio)<< separator;
result << "density=" << CommonTestUtils::vec2str(attributes.density)<< separator;
result << "fixed_ratio=" << CommonTestUtils::vec2str(attributes.fixed_ratio)<< separator;
result << "fixed_size=" << CommonTestUtils::vec2str(attributes.fixed_size)<< separator;
result << "variance=" << CommonTestUtils::vec2str(attributes.variance)<< separator;
result << "step=" << attributes.step << separator;
result << "offset=" << attributes.offset << separator;
result << "clip=" << attributes.clip << separator;
result << "flip=" << attributes.flip<< separator;
result << "scale_all_sizes=" << attributes.scale_all_sizes << separator;
result << "trgDev=" << targetDevice;
return result.str();
}
protected:
void SetUp() override {
priorBoxSpecificParams specParams;
InferenceEngine::Layout inLayout;
InferenceEngine::Layout outLayout;
ov::test::ElementType netPrecision;
ov::test::ElementType inPrc;
ov::test::ElementType outPrc;
ov::test::InputShape inputShapes;
ov::test::InputShape imageShapes;
std::tie(specParams, netPrecision,
inPrc, outPrc, inLayout, outLayout,
inputShapes, imageShapes, targetDevice) = GetParam();
selectedType = makeSelectedTypeStr("ref", inPrc);
targetDevice = CommonTestUtils::DEVICE_CPU;
init_input_shapes({ inputShapes, imageShapes });
ngraph::op::PriorBoxAttrs attributes;
std::tie(
attributes.min_size,
attributes.max_size,
attributes.aspect_ratio,
attributes.density,
attributes.fixed_ratio,
attributes.fixed_size,
attributes.clip,
attributes.flip,
attributes.step,
attributes.offset,
attributes.variance,
attributes.scale_all_sizes) = specParams;
auto params = ngraph::builder::makeDynamicParams(netPrecision, inputDynamicShapes);
auto shape_of_1 = std::make_shared<ngraph::opset3::ShapeOf>(params[0]);
auto shape_of_2 = std::make_shared<ngraph::opset3::ShapeOf>(params[1]);
auto priorBox = std::make_shared<ngraph::op::PriorBox>(
shape_of_1,
shape_of_2,
attributes);
ngraph::ResultVector results{std::make_shared<ngraph::opset1::Result>(priorBox)};
function = std::make_shared <ngraph::Function>(results, params, "priorBox");
}
};
TEST_P(PriorBoxLayerCPUTest, CompareWithRefs) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
run();
CheckPluginRelatedResults(executableNetwork, "PriorBox");
}
namespace {
const std::vector<ov::test::ElementType> netPrecisions = {
ov::test::ElementType::i32,
ov::test::ElementType::u64};
const std::vector<std::vector<float>> min_sizes = {{256.0f}};
const std::vector<std::vector<float>> max_sizes = {{315.0f}};
const std::vector<std::vector<float>> aspect_ratios = {{2.0f}};
const std::vector<std::vector<float>> densities = {{1.0f}};
const std::vector<std::vector<float>> fixed_ratios = {{}};
const std::vector<std::vector<float>> fixed_sizes = {{}};
const std::vector<bool> clips = {false, true};
const std::vector<bool> flips = {false, true};
const std::vector<float> steps = {1.0f};
const std::vector<float> offsets = {0.0f};
const std::vector<std::vector<float>> variances = {{}};
const std::vector<bool> scale_all_sizes = { false, true};
const std::vector<ov::test::InputShape> inputShape = {
{{300, 300}, {{300, 300}}},
{{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{300, 300}, {150, 150}}},
{{{150, 300}, {150, 300}}, {{300, 300}, {150, 150}}}
};
const std::vector<ov::test::InputShape> imageShape = {
{{32, 32}, {{32, 32}}},
{{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{32, 32}, {16, 16}}},
{{{16, 32}, {16, 32}}, {{32, 32}, {16, 16}}}
};
const auto layerSpecificParams = ::testing::Combine(
::testing::ValuesIn(min_sizes),
::testing::ValuesIn(max_sizes),
::testing::ValuesIn(aspect_ratios),
::testing::ValuesIn(densities),
::testing::ValuesIn(fixed_ratios),
::testing::ValuesIn(fixed_sizes),
::testing::ValuesIn(clips),
::testing::ValuesIn(flips),
::testing::ValuesIn(steps),
::testing::ValuesIn(offsets),
::testing::ValuesIn(variances),
::testing::ValuesIn(scale_all_sizes));
INSTANTIATE_TEST_SUITE_P(smoke_PriorBox, PriorBoxLayerCPUTest,
::testing::Combine(
layerSpecificParams,
::testing::ValuesIn(netPrecisions),
::testing::Values(ov::test::ElementType::undefined),
::testing::Values(ov::test::ElementType::undefined),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::ValuesIn(inputShape),
::testing::ValuesIn(imageShape),
::testing::Values(CommonTestUtils::DEVICE_CPU)),
PriorBoxLayerCPUTest::getTestCaseName);
} // namespace
} // namespace CPULayerTestsDefinitions

View File

@ -0,0 +1,228 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <string>
#include <sstream>
#include <vector>
#include <openvino/core/partial_shape.hpp>
#include "ngraph_functions/builders.hpp"
#include "shared_test_classes/base/layer_test_utils.hpp"
#include "shared_test_classes/base/ov_subgraph.hpp"
#include "test_utils/cpu_test_utils.hpp"
using namespace InferenceEngine;
using namespace CPUTestUtils;
using namespace ov::test;
namespace CPULayerTestsDefinitions {
typedef std::tuple<
std::vector<float>, // widths
std::vector<float>, // heights
bool, // clip
float, // step_width
float, // step_height
float, // step
float, // offset
std::vector<float>> priorBoxClusteredSpecificParams;
typedef std::tuple<
priorBoxClusteredSpecificParams,
ov::test::ElementType, // net precision
ov::test::ElementType, // Input precision
ov::test::ElementType, // Output precision
InferenceEngine::Layout, // Input layout
InferenceEngine::Layout, // Output layout
ov::test::InputShape, // input shape
ov::test::InputShape, // image shape
std::string> priorBoxClusteredLayerParams;
class PriorBoxClusteredLayerCPUTest : public testing::WithParamInterface<priorBoxClusteredLayerParams>,
virtual public SubgraphBaseTest, public CPUTestsBase {
public:
static std::string getTestCaseName(const testing::TestParamInfo<priorBoxClusteredLayerParams>& obj) {
ov::test::ElementType netPrecision;
ov::test::ElementType inPrc, outPrc;
InferenceEngine::Layout inLayout, outLayout;
ov::test::InputShape inputShapes, imageShapes;
std::string targetDevice;
priorBoxClusteredSpecificParams specParams;
std::tie(specParams,
netPrecision,
inPrc, outPrc, inLayout, outLayout,
inputShapes,
imageShapes,
targetDevice) = obj.param;
ngraph::op::PriorBoxClusteredAttrs attributes;
std::tie(
attributes.widths,
attributes.heights,
attributes.clip,
attributes.step_widths,
attributes.step_heights,
attributes.step,
attributes.offset,
attributes.variances) = specParams;
std::ostringstream result;
const char separator = '_';
result << "IS=" << inputShapes << separator;
result << "imageS=" << imageShapes << separator;
result << "netPRC=" << netPrecision << separator;
result << "inPRC=" << inPrc << separator;
result << "outPRC=" << outPrc << separator;
result << "inL=" << inLayout << separator;
result << "outL=" << outLayout << separator;
result << "widths=" << CommonTestUtils::vec2str(attributes.widths) << separator;
result << "heights=" << CommonTestUtils::vec2str(attributes.heights) << separator;
result << "variances=";
if (attributes.variances.empty())
result << "()" << separator;
else
result << CommonTestUtils::vec2str(attributes.variances) << separator;
result << "stepWidth=" << attributes.step_widths << separator;
result << "stepHeight=" << attributes.step_heights << separator;
result << "step=" << attributes.step << separator;
result << "offset=" << attributes.offset << separator;
result << "clip=" << std::boolalpha << attributes.clip << separator;
result << "trgDev=" << targetDevice;
return result.str();
}
protected:
void SetUp() override {
priorBoxClusteredSpecificParams specParams;
InferenceEngine::Layout inLayout;
InferenceEngine::Layout outLayout;
ov::test::ElementType netPrecision;
ov::test::ElementType inPrc;
ov::test::ElementType outPrc;
ov::test::InputShape inputShapes;
ov::test::InputShape imageShapes;
std::tie(specParams, netPrecision,
inPrc, outPrc, inLayout, outLayout,
inputShapes, imageShapes, targetDevice) = GetParam();
selectedType = makeSelectedTypeStr("ref", inPrc);
targetDevice = CommonTestUtils::DEVICE_CPU;
init_input_shapes({ inputShapes, imageShapes });
ngraph::op::PriorBoxClusteredAttrs attributes;
std::tie(
attributes.widths,
attributes.heights,
attributes.clip,
attributes.step_widths,
attributes.step_heights,
attributes.step,
attributes.offset,
attributes.variances) = specParams;
auto params = ngraph::builder::makeDynamicParams(netPrecision, { inputShapes.first, imageShapes.first });
auto shape_of_1 = std::make_shared<ngraph::opset3::ShapeOf>(params[0]);
auto shape_of_2 = std::make_shared<ngraph::opset3::ShapeOf>(params[1]);
auto priorBoxClustered = std::make_shared<ngraph::op::PriorBoxClustered>(
shape_of_1,
shape_of_2,
attributes);
ngraph::ResultVector results{ std::make_shared<ngraph::opset1::Result>(priorBoxClustered) };
function = std::make_shared<ngraph::Function>(results, params, "priorBoxClustered");
}
};
TEST_P(PriorBoxClusteredLayerCPUTest, CompareWithRefs) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
run();
CheckPluginRelatedResults(executableNetwork, "PriorBoxClustered");
}
namespace {
// Common params
const std::vector<ov::test::ElementType> netPrecisions = {
ov::test::ElementType::f32,
ov::test::ElementType::f16
};
const std::vector<std::vector<float>> widths = {
{ 5.12f, 14.6f, 13.5f },
{ 7.0f, 8.2f, 33.39f }
};
const std::vector<std::vector<float>> heights = {
{ 15.12f, 15.6f, 23.5f },
{ 10.0f, 16.2f, 36.2f }
};
const std::vector<float> step_widths = {
0.0f, 2.0f
};
const std::vector<float> step_heights = {
0.0f, 1.5f
};
const std::vector<float> step = {
0.0f, 1.0f, 1.5f
};
const std::vector<float> offsets = {
0.5f
};
const std::vector<std::vector<float>> variances = {
{0.1f, 0.1f, 0.2f, 0.2f},
{0.2f},
{}
};
const std::vector<bool> clips = {
true, false
};
const auto layerSpeficParams = ::testing::Combine(
::testing::ValuesIn(widths),
::testing::ValuesIn(heights),
::testing::ValuesIn(clips),
::testing::ValuesIn(step_widths),
::testing::ValuesIn(step_heights),
::testing::ValuesIn(step),
::testing::ValuesIn(offsets),
::testing::ValuesIn(variances)
);
const std::vector<ov::test::InputShape> inputShapes = {
{{4, 4}, {{4, 4}}},
{{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{4, 4}, {8, 8}}},
{{{4, 8}, {4, 8}}, {{4, 4}, {8, 8}}}
};
const std::vector<ov::test::InputShape> imageShapes = {
{{50, 50}, {{50, 50}}},
{{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{50, 50}, {100, 100}}},
{{{50, 100}, {50, 100}}, {{50, 50}, {100, 100}}}
};
INSTANTIATE_TEST_SUITE_P(smoke_PriorBoxClustered, PriorBoxClusteredLayerCPUTest,
::testing::Combine(
layerSpeficParams,
::testing::ValuesIn(netPrecisions),
::testing::Values(ov::test::ElementType::undefined),
::testing::Values(ov::test::ElementType::undefined),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::ValuesIn(inputShapes),
::testing::ValuesIn(imageShapes),
::testing::Values(CommonTestUtils::DEVICE_CPU)),
PriorBoxClusteredLayerCPUTest::getTestCaseName
);
} // namespace
} // namespace CPULayerTestsDefinitions

View File

@ -266,7 +266,13 @@ void SubgraphBaseTest::validate() {
void SubgraphBaseTest::init_input_shapes(const std::vector<InputShape>& shapes) {
size_t targetStaticShapeSize = shapes.front().second.size();
for (size_t i = 1; i < shapes.size(); ++i) {
if (targetStaticShapeSize < shapes[i].second.size()) {
targetStaticShapeSize = shapes[i].second.size();
}
}
targetStaticShapes.resize(targetStaticShapeSize);
for (const auto& shape : shapes) {
auto dynShape = shape.first;
if (dynShape.rank() == 0) {
@ -274,10 +280,8 @@ void SubgraphBaseTest::init_input_shapes(const std::vector<InputShape>& shapes)
dynShape = shape.second.front();
}
inputDynamicShapes.push_back(dynShape);
ASSERT_EQ(shape.second.size(), targetStaticShapeSize)
<< "Target static count shapes should be the same for all inputs";
for (size_t i = 0; i < shape.second.size(); ++i) {
targetStaticShapes[i].push_back(shape.second.at(i));
for (size_t i = 0; i < targetStaticShapeSize; ++i) {
targetStaticShapes[i].push_back(i < shape.second.size() ? shape.second.at(i) : shape.second.back());
}
}
}

View File

@ -59,8 +59,9 @@ void PriorBoxLayerTest::SetUp() {
inPrc, outPrc, inLayout, outLayout,
inputShapes, imageShapes, targetDevice) = GetParam();
std::tie(min_size, max_size, aspect_ratio, density, fixed_ratio, fixed_size,
clip, flip, step, offset, variance, scale_all_sizes,
std::tie(min_size, max_size, aspect_ratio,
density, fixed_ratio, fixed_size, clip,
flip, step, offset, variance, scale_all_sizes,
min_max_aspect_ratios_order) = specParams;
auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);