[IE][VPU]: MVN-6 native partial support (#4238)
* Adding of partial support of MVN-6 operation to VPU Plugin (support restricted by MVN-1 interface) * Disabling of MVN-6 decomposition for VPU Plugin * Enabling of test cases for this operation * Disabling of deprecated MVN tests while investigation is go on
This commit is contained in:
parent
7f60c3f82d
commit
b7212b9f34
@ -47,6 +47,7 @@
|
||||
#include <legacy/transformations/convert_opset1_to_legacy/convert_strided_slice_to_crop.hpp>
|
||||
#include <vpu/ngraph/transformations/extract_dynamic_batch/extract_dynamic_batch.hpp>
|
||||
#include <vpu/ngraph/transformations/merge_gather_gather_elements.hpp>
|
||||
#include <transformations/op_conversions/mvn6_decomposition.hpp>
|
||||
|
||||
namespace vpu {
|
||||
|
||||
@ -195,6 +196,7 @@ ie::CNNNetwork FrontEnd::convertNetwork(ie::CNNNetwork& network) {
|
||||
pass_config->disable<ngraph::pass::SoftPlusDecomposition>();
|
||||
pass_config->disable<ngraph::pass::ConvertMinimum>();
|
||||
pass_config->disable<ngraph::pass::HSwishDecomposition>();
|
||||
pass_config->disable<ngraph::pass::MVN6Decomposition>();
|
||||
|
||||
auto transformationPredicate = [](const std::shared_ptr<const ngraph::Node>& node) -> bool {
|
||||
return !!std::dynamic_pointer_cast<const ngraph::vpu::op::DynamicShapeResolver>(node->input_value(0).get_node_shared_ptr());
|
||||
|
@ -42,7 +42,7 @@ private:
|
||||
}
|
||||
|
||||
void initialCheckImpl() const override {
|
||||
assertInputsOutputsTypes(this, {{DataType::FP16}}, {{DataType::FP16}});
|
||||
assertInputsOutputsTypes(this, {{DataType::FP16}, {DataType::S32}}, {{DataType::FP16}});
|
||||
}
|
||||
|
||||
void serializeParamsImpl(BlobSerializer& serializer) const override {
|
||||
@ -66,17 +66,45 @@ private:
|
||||
|
||||
} // namespace
|
||||
|
||||
void FrontEnd::parseMVN(const Model& model, const ie::CNNLayerPtr& _layer, const DataVector& inputs, const DataVector& outputs) const {
|
||||
IE_ASSERT(inputs.size() == 1);
|
||||
IE_ASSERT(outputs.size() == 1);
|
||||
void FrontEnd::parseMVN(const Model& model, const ie::CNNLayerPtr& layer, const DataVector& inputs, const DataVector& outputs) const {
|
||||
VPU_THROW_UNLESS(inputs.size() == 2, "%d inputs provided to %s layer, but 2 expected.",
|
||||
inputs.size(), layer->name);
|
||||
VPU_THROW_UNLESS(outputs.size() == 1, "%d outputs provided to %s layer, but 1 expected.",
|
||||
outputs.size(), layer->name);
|
||||
|
||||
auto layer = std::dynamic_pointer_cast<ie::MVNLayer>(_layer);
|
||||
IE_ASSERT(layer != nullptr);
|
||||
const auto& input = inputs[0];
|
||||
const auto ndims = input->desc().numDims();
|
||||
VPU_THROW_UNLESS(ndims == 3 || ndims == 4, "%d input rank provided to %s layer, but only 3D and 4D supported.",
|
||||
ndims, layer->name);
|
||||
|
||||
const auto& indices = inputs[1];
|
||||
const auto indicesSize = indices->desc().totalDimSize();
|
||||
const auto indicesPtr = indices->content()->get<int>();
|
||||
|
||||
const auto& getDimFromAxis = [](int ndims, int axisIndex) -> Dim {
|
||||
return DimsOrder::fromNumDims(ndims).toPermutation()[ndims - axisIndex - 1];
|
||||
};
|
||||
DimSet axes;
|
||||
for (int i = 0; i < indicesSize; i++) {
|
||||
axes.insert(getDimFromAxis(ndims, indicesPtr[i]));
|
||||
}
|
||||
|
||||
VPU_THROW_UNLESS(!axes.count(Dim::N) && axes.count(Dim::H) && axes.count(Dim::W),
|
||||
"Unsupported combination of indices in layer \"%s\". "
|
||||
"Only across channel and full batch supported.", layer->name);
|
||||
const auto acrossChannels = axes.count(Dim::C) != 0;
|
||||
|
||||
const auto normVariance = layer->GetParamAsBool("normalize_variance");
|
||||
const auto eps = layer->GetParamAsFloat("eps");
|
||||
const auto epsMode = layer->GetParamAsString("eps_mode", "outside_sqrt");
|
||||
VPU_THROW_UNLESS(epsMode == "outside_sqrt",
|
||||
"eps_mode == %s provided to %s layer, but only eps_mode == \"outside_sqrt\" supported.",
|
||||
epsMode, layer->name);
|
||||
|
||||
auto stage = model->addNewStage<MVNStage>(layer->name, StageType::MVN, layer, inputs, outputs);
|
||||
stage->attrs().set<int>("normalize", layer->normalize);
|
||||
stage->attrs().set<int>("across_channels", layer->across_channels);
|
||||
stage->attrs().set<float>("eps", layer->GetParamAsFloat("eps", 0.0f));
|
||||
stage->attrs().set<int>("normalize", normVariance);
|
||||
stage->attrs().set<int>("across_channels", acrossChannels);
|
||||
stage->attrs().set<float>("eps", eps);
|
||||
}
|
||||
|
||||
} // namespace vpu
|
||||
|
@ -0,0 +1,63 @@
|
||||
// Copyright (C) 2021 Intel Corporation
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <vector>
|
||||
#include "single_layer_tests/mvn.hpp"
|
||||
|
||||
using namespace LayerTestsDefinitions;
|
||||
|
||||
namespace {
|
||||
|
||||
const std::vector<std::vector<int>> indices_4D = {
|
||||
{2, 3}, // equivalent MVN-1 across_channel=0
|
||||
{1, 2, 3} // equivalent MVN-1 across_channel=1
|
||||
};
|
||||
|
||||
const std::vector<std::vector<int>> indices_3D = {
|
||||
{1, 2}, // equivalent MVN-1 across_channel=0
|
||||
{0, 1, 2} // equivalent MVN-1 across_channel=1
|
||||
};
|
||||
|
||||
const std::vector<InferenceEngine::SizeVector> input_shape_4D = {
|
||||
{3, 3, 51, 89},
|
||||
{1, 3, 256, 384},
|
||||
{1, 10, 5, 17},
|
||||
{1, 3, 8, 9}
|
||||
};
|
||||
|
||||
const std::vector<InferenceEngine::SizeVector> input_shape_3D = {
|
||||
{1, 32, 17},
|
||||
{1, 37, 9}
|
||||
};
|
||||
|
||||
const std::vector<float> eps = {
|
||||
1.0e-10, 1.0e-8, 1.0e-7, 1.0e-5, 1.0e-3
|
||||
};
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(smoke_MVN_4D, Mvn6LayerTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(input_shape_4D),
|
||||
::testing::Values(InferenceEngine::Precision::FP16),
|
||||
::testing::Values(InferenceEngine::Precision::I32),
|
||||
::testing::ValuesIn(indices_4D),
|
||||
::testing::Values(false, true),
|
||||
::testing::ValuesIn(eps),
|
||||
::testing::Values("outside_sqrt"),
|
||||
::testing::Values(CommonTestUtils::DEVICE_MYRIAD)),
|
||||
Mvn6LayerTest::getTestCaseName);
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(smoke_MVN_3D, Mvn6LayerTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(input_shape_3D),
|
||||
::testing::Values(InferenceEngine::Precision::FP16),
|
||||
::testing::Values(InferenceEngine::Precision::I32),
|
||||
::testing::ValuesIn(indices_3D),
|
||||
::testing::Values(false, true),
|
||||
::testing::ValuesIn(eps),
|
||||
::testing::Values("outside_sqrt"),
|
||||
::testing::Values(CommonTestUtils::DEVICE_MYRIAD)),
|
||||
Mvn6LayerTest::getTestCaseName);
|
||||
|
||||
} // namespace
|
@ -13,7 +13,7 @@ INSTANTIATE_TEST_CASE_P(accuracy, myriadLayersTestsMVN_smoke,
|
||||
::testing::Values(IRVersion::v7, IRVersion::v10),
|
||||
::testing::ValuesIn(s_MVNCustomConfig)));
|
||||
|
||||
TEST_F(myriadLayersTests_nightly, MVN_CHW_Input)
|
||||
TEST_F(myriadLayersTests_nightly, DISABLED_MVN_CHW_Input)
|
||||
{
|
||||
std::string model = R"V0G0N(
|
||||
<net name="MVN" version="2" batch="1">
|
||||
|
@ -138,7 +138,7 @@ PRETTY_PARAM(Epsilon, float)
|
||||
|
||||
typedef myriadLayerTestBaseWithParam<std::tuple<Dims, AcrossChannels, Normalize, Epsilon, IRVersion, std::string>> myriadLayersTestsMVN_smoke;
|
||||
|
||||
TEST_P(myriadLayersTestsMVN_smoke, MVN)
|
||||
TEST_P(myriadLayersTestsMVN_smoke, DISABLED_MVN)
|
||||
{
|
||||
tensor_test_params dims = std::get<0>(GetParam());
|
||||
int acrossChannels = std::get<1>(GetParam());
|
||||
|
Loading…
Reference in New Issue
Block a user