[IE VPU] Dynamic Broadcast tests (#737)
* [IE VPU] Enable StaticShapeBroadcast tests * [IE VPU] DSR: support case when shape is output and input for stage * [IE VPU] Enable Broadcast and Transpose tests * [IE VPU] DSR: fix typo * [IE VPU] Add assertion for numConsumer in DSR * [IE VPU] Added CheckMyriadX helper method * [IE VPU] New DSR assert for input->getInputTo * [IE VPU] Fix myriad2 tests bug
This commit is contained in:
parent
9fc818478a
commit
18004bdb5e
@ -38,6 +38,8 @@ public:
|
||||
PartialShape getEvaluatedShape() const { return m_evaluatedOutputShape; }
|
||||
void setEvaluatedShape(const PartialShape& shape) { m_evaluatedOutputShape = shape; }
|
||||
|
||||
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) override;
|
||||
|
||||
private:
|
||||
PartialShape m_evaluatedOutputShape;
|
||||
};
|
||||
|
@ -141,6 +141,10 @@ bool StaticShapeBroadcast::visit_attributes(ngraph::AttributeVisitor& visitor) {
|
||||
return true;
|
||||
}
|
||||
|
||||
bool StaticShapeBroadcast::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) {
|
||||
return ::ngraph::op::util::BroadcastBase::evaluate(outputs, inputs);
|
||||
}
|
||||
|
||||
} // namespace op
|
||||
} // namespace vpu
|
||||
} // namespace ngraph
|
||||
|
@ -164,7 +164,7 @@ public:
|
||||
void parseConcat(const Model& model, const ie::CNNLayerPtr& layer, const DataVector& inputs, const DataVector& outputs) const;
|
||||
void parseSplit(const Model& model, const ie::CNNLayerPtr& layer, const DataVector& inputs, const DataVector& outputs) const;
|
||||
void parseStridedSlice(const Model& model, const ie::CNNLayerPtr& layer, const DataVector& inputs, const DataVector& outputs) const;
|
||||
void parseDSR(const Model& model, const ie::CNNLayerPtr& layer, const DataVector& inputs, const DataVector& outputs) const;
|
||||
void parseDSR(const Model& model, const ie::CNNLayerPtr& layer, const DataVector& inputs, const DataVector& outputs);
|
||||
|
||||
//
|
||||
// Parser with data sharing
|
||||
|
@ -49,6 +49,10 @@ protected:
|
||||
void getBatchSupportInfoImpl(StageDataInfo<BatchSupport>& batchInfo) override {
|
||||
}
|
||||
|
||||
StageSHAVEsRequirements getSHAVEsRequirementsImpl() const override {
|
||||
return StageSHAVEsRequirements::NotNeeded;
|
||||
}
|
||||
|
||||
void initialCheckImpl() const override {
|
||||
const auto mode = attrs().getOrDefault<BroadcastMode>("mode", BroadcastMode::NUMPY);
|
||||
const auto& dataPrecision = input(0)->desc().type();
|
||||
@ -122,7 +126,7 @@ void FrontEnd::parseBroadcast(
|
||||
"{} layer with name {} and explicit mode must have 1D axesMapping tensor, "
|
||||
"actually provided {}D tensor",
|
||||
layer->type, layer->name, axesMappingDesc.numDims());
|
||||
VPU_THROW_UNLESS(axesMappingDim == output->desc().numDims(),
|
||||
VPU_THROW_UNLESS(axesMappingDim == inputs[0]->desc().numDims(),
|
||||
"{} layer with name {} and explicit mode must have axesMapping tensor with "
|
||||
"size equals to number of output dims, expected [{}], provided [{}]",
|
||||
layer->type, layer->name, output->desc().numDims(), axesMappingDim);
|
||||
|
@ -3,10 +3,11 @@
|
||||
//
|
||||
|
||||
#include <vpu/frontend/frontend.hpp>
|
||||
#include <ngraph/node.hpp>
|
||||
|
||||
namespace vpu {
|
||||
|
||||
void FrontEnd::parseDSR(const Model& model, const ie::CNNLayerPtr& layer, const DataVector& inputs, const DataVector& outputs) const {
|
||||
void FrontEnd::parseDSR(const Model& model, const ie::CNNLayerPtr& layer, const DataVector& inputs, const DataVector& outputs) {
|
||||
VPU_THROW_UNLESS(inputs.size() == 2, "Error while parsing {} of type {}, got {} inputs, while {} were expected",
|
||||
layer->name, layer->type, inputs.size(), 2);
|
||||
const auto& data = inputs[0];
|
||||
@ -20,6 +21,22 @@ void FrontEnd::parseDSR(const Model& model, const ie::CNNLayerPtr& layer, const
|
||||
VPU_THROW_UNLESS(dataProducerEdge != nullptr, "Parsing layer {} of type {} failed: input with index {} (of name {}) must have a producer",
|
||||
layer->name, layer->type, 0, data->name());
|
||||
|
||||
const auto ngraphNode = layer->getNode();
|
||||
VPU_THROW_UNLESS(!ngraphNode || ngraphNode->get_input_source_output(0).get_target_inputs().size() == 1,
|
||||
"Parsing layer {} of type {} failed: input with index {} (of name {}) must not be an input for any operation except current "
|
||||
"of type {}, actual number of operations for which data is input is {}. "
|
||||
"DynamicToStaticShape transformations should add {} operation after all operations with dynamic output as only "
|
||||
"consumer. All operations that were previously original output data consumers should now consume the output data "
|
||||
"from {}. Otherwise the consumer which was not redirected to {} output would process garbage data.",
|
||||
layer->name, layer->type, 0, data->name(), layer->type, ngraphNode->get_input_source_output(0).get_target_inputs().size(),
|
||||
layer->type, layer->type);
|
||||
VPU_THROW_UNLESS(data->consumerEdges().size() == 0,
|
||||
"Parsing layer {} of type {} failed: input with index {} (of name {}) must have no consumers, actual: {}. "
|
||||
"DynamicToStaticShape transformations should add {} operation after all operations with dynamic output as only "
|
||||
"consumer. All operations that were previously original output data consumers should now consume the output data "
|
||||
"from {}. Otherwise the consumer which was not redirected to {} output would process garbage data.",
|
||||
layer->name, layer->type, 0, data->name(), data->consumerEdges().size(), layer->type, layer->type, layer->type);
|
||||
|
||||
VPU_THROW_UNLESS(shape->desc().numDims() == 1,
|
||||
"Parsing layer {} of type {} failed: input with index {} (of name {}) must have rank equal to {}, actual is {}",
|
||||
layer->name, layer->type, 0, shape->name(), 1, shape->desc().numDims());
|
||||
@ -50,6 +67,11 @@ void FrontEnd::parseDSR(const Model& model, const ie::CNNLayerPtr& layer, const
|
||||
// Create the second output with shape in case of dynamic output
|
||||
const auto& shapeOutput = model->addOutputData(dataOutput->name() + "@shape", shape->desc());
|
||||
|
||||
bindData(shapeOutput, shape->origData());
|
||||
for (const auto& shapeConsumerEdge : shape->consumerEdges()) {
|
||||
model->replaceStageInput(shapeConsumerEdge, shapeOutput);
|
||||
}
|
||||
|
||||
for (const auto& dataToShapeEdge : shape->childDataToShapeEdges()) {
|
||||
model->replaceDataToShapeParent(dataToShapeEdge, shapeOutput);
|
||||
}
|
||||
|
@ -0,0 +1,21 @@
|
||||
// Copyright (C) 2020 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <cstdlib>
|
||||
#include <iostream>
|
||||
|
||||
namespace CommonTestUtils {
|
||||
namespace vpu {
|
||||
|
||||
bool CheckMyriad2() {
|
||||
if (const auto& envVar = std::getenv("IE_VPU_MYRIADX")) {
|
||||
return std::stoi(envVar) == 0;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace vpu
|
||||
} // namespace CommonTestUtils
|
@ -0,0 +1,120 @@
|
||||
// Copyright (C) 2020 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "vpu/ngraph/operations/static_shape_broadcast.hpp"
|
||||
|
||||
#include "vpu/private_plugin_config.hpp"
|
||||
|
||||
#include <functional_test_utils/layer_test_utils.hpp>
|
||||
#include <functional_test_utils/blob_utils.hpp>
|
||||
#include <ngraph/opsets/opset3.hpp>
|
||||
|
||||
#include <tuple>
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <memory>
|
||||
|
||||
using TensorShape = InferenceEngine::SizeVector;
|
||||
|
||||
using StaticShapeBroadcastParam = std::tuple<
|
||||
TensorShape, // Input shape
|
||||
TensorShape, // Target shape
|
||||
TensorShape>; // Axes mapping
|
||||
|
||||
using StaticShapeBroadcastTestParam = std::tuple<
|
||||
StaticShapeBroadcastParam, // Shapes param
|
||||
InferenceEngine::Precision, // Input precision
|
||||
LayerTestsUtils::TargetDevice>; // Device name
|
||||
|
||||
namespace LayerTestsDefinitions {
|
||||
|
||||
class StaticShapeBroadcastLayerTest : public testing::WithParamInterface<StaticShapeBroadcastTestParam>,
|
||||
public LayerTestsUtils::LayerTestsCommon {
|
||||
public:
|
||||
static std::string getTestCaseName(const testing::TestParamInfo<StaticShapeBroadcastTestParam>& obj) {
|
||||
StaticShapeBroadcastParam shapes;
|
||||
InferenceEngine::Precision inputPrecision;
|
||||
std::string targetDevice;
|
||||
std::tie(shapes, inputPrecision, targetDevice) = obj.param;
|
||||
|
||||
const auto inputShape = std::get<0>(shapes);
|
||||
const auto targetShape = std::get<1>(shapes);
|
||||
const auto axesMapping = std::get<2>(shapes);
|
||||
|
||||
std::ostringstream result;
|
||||
result << "IS=" << CommonTestUtils::vec2str(inputShape) << "_";
|
||||
result << "TS=" << CommonTestUtils::vec2str(targetShape) << "_";
|
||||
if (!axesMapping.empty()) {
|
||||
result << "AM=" << CommonTestUtils::vec2str(axesMapping) << "_";
|
||||
}
|
||||
result << "inPrc=" << inputPrecision.name() << "_";
|
||||
result << "targetDevice=" << targetDevice;
|
||||
return result.str();
|
||||
}
|
||||
|
||||
protected:
|
||||
void SetUp() override {
|
||||
SetRefMode(LayerTestsUtils::RefMode::INTERPRETER);
|
||||
configuration[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
|
||||
|
||||
StaticShapeBroadcastParam shapes;
|
||||
std::tie(shapes, inPrc, targetDevice) = this->GetParam();
|
||||
|
||||
const auto inputShape = std::get<0>(shapes);
|
||||
const auto targetShape = std::get<1>(shapes);
|
||||
const auto axesMapping = std::get<2>(shapes);
|
||||
|
||||
auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inPrc);
|
||||
|
||||
const auto inputParam = std::make_shared<ngraph::opset3::Parameter>(
|
||||
ngPrc, ngraph::Shape(inputShape));
|
||||
const auto targetShapeConst = std::make_shared<ngraph::opset3::Constant>(
|
||||
ngraph::element::i64, ngraph::Shape{targetShape.size()}, targetShape);
|
||||
|
||||
std::shared_ptr<ngraph::vpu::op::StaticShapeBroadcast> staticShapeBroadcast;
|
||||
if (axesMapping.empty()) {
|
||||
staticShapeBroadcast = std::make_shared<ngraph::vpu::op::StaticShapeBroadcast>(
|
||||
inputParam, targetShapeConst);
|
||||
} else {
|
||||
const auto axesMappingConst = std::make_shared<ngraph::opset3::Constant>(
|
||||
ngraph::element::i64, ngraph::Shape{axesMapping.size()}, axesMapping);
|
||||
staticShapeBroadcast = std::make_shared<ngraph::vpu::op::StaticShapeBroadcast>(
|
||||
inputParam, targetShapeConst, axesMappingConst);
|
||||
}
|
||||
|
||||
ngraph::ResultVector results{std::make_shared<ngraph::opset3::Result>(staticShapeBroadcast->output(0))};
|
||||
function = std::make_shared<ngraph::Function>(results, ngraph::ParameterVector{inputParam});
|
||||
}
|
||||
};
|
||||
|
||||
TEST_P(StaticShapeBroadcastLayerTest, accuracy) {
|
||||
Run();
|
||||
}
|
||||
|
||||
std::vector<StaticShapeBroadcastParam> broadcastParam = {
|
||||
std::make_tuple(TensorShape{ 14 }, TensorShape{ 2, 16, 15, 14 }, TensorShape{}),
|
||||
std::make_tuple(TensorShape{ 15, 1 }, TensorShape{ 2, 16, 15, 14 }, TensorShape{}),
|
||||
std::make_tuple(TensorShape{ 15, 14 }, TensorShape{ 2, 16, 15, 14 }, TensorShape{}),
|
||||
std::make_tuple(TensorShape{ 16, 1, 1 }, TensorShape{ 2, 16, 15, 14 }, TensorShape{}),
|
||||
std::make_tuple(TensorShape{ 16, 1, 14 }, TensorShape{ 2, 16, 15, 14 }, TensorShape{}),
|
||||
std::make_tuple(TensorShape{ 16, 15, 1 }, TensorShape{ 2, 16, 15, 14 }, TensorShape{}),
|
||||
|
||||
std::make_tuple(TensorShape{ 80 }, TensorShape{ 80, 1 }, TensorShape{ 0 }),
|
||||
std::make_tuple(TensorShape{ 16 }, TensorShape{ 1, 16, 50, 50 }, TensorShape{ 1 }),
|
||||
std::make_tuple(TensorShape{ 50, 50 }, TensorShape{ 1, 50, 50, 16 }, TensorShape{ 1, 2 }),
|
||||
};
|
||||
|
||||
std::vector<InferenceEngine::Precision> broadcastPrecisions = {
|
||||
InferenceEngine::Precision::FP32,
|
||||
InferenceEngine::Precision::I32,
|
||||
};
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(accuracy, StaticShapeBroadcastLayerTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(broadcastParam),
|
||||
::testing::ValuesIn(broadcastPrecisions),
|
||||
::testing::Values(CommonTestUtils::DEVICE_MYRIAD)),
|
||||
StaticShapeBroadcastLayerTest::getTestCaseName);
|
||||
|
||||
} // namespace LayerTestsDefinitions
|
@ -4,13 +4,16 @@
|
||||
|
||||
#include <vpu/ngraph/operations/dynamic_shape_resolver.hpp>
|
||||
|
||||
#include "vpu/private_plugin_config.hpp"
|
||||
|
||||
#include "../common/myriad_common_test_utils.hpp"
|
||||
#include <functional_test_utils/layer_test_utils.hpp>
|
||||
#include <ngraph_functions/builders.hpp>
|
||||
|
||||
namespace {
|
||||
|
||||
using TensorType = ngraph::element::Type;
|
||||
using TensorShape = ngraph::PartialShape;
|
||||
using TensorShape = ngraph::Shape;
|
||||
|
||||
using BroadcastExplicitTestParams = std::tuple<
|
||||
TensorType, TensorShape, LayerTestsUtils::TargetDevice>;
|
||||
@ -19,6 +22,13 @@ class NonZero_Broadcast : public testing::WithParamInterface<BroadcastExplicitTe
|
||||
public LayerTestsUtils::LayerTestsCommon {
|
||||
protected:
|
||||
void SetUp() override {
|
||||
SetRefMode(LayerTestsUtils::RefMode::CONSTANT_FOLDING);
|
||||
configuration[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
|
||||
// DISABLE_REORDER is needed for Myriad2 cases
|
||||
if (CommonTestUtils::vpu::CheckMyriad2()) {
|
||||
configuration[VPU_CONFIG_KEY(DISABLE_REORDER)] = CONFIG_VALUE(YES);
|
||||
}
|
||||
|
||||
const auto& parameters = GetParam();
|
||||
const auto& tensorType = std::get<0>(parameters);
|
||||
const auto& tensorShape = std::get<1>(parameters);
|
||||
@ -30,18 +40,17 @@ protected:
|
||||
const auto shapeOfNonZero = std::make_shared<ngraph::opset3::ShapeOf>(nonZero);
|
||||
|
||||
const auto broadcastConstant = std::make_shared<ngraph::opset3::Constant>(
|
||||
tensorType, ngraph::Shape{1}, 1);
|
||||
|
||||
tensorType, ngraph::Shape{tensorShape.size()}, 1);
|
||||
const auto axesMappingConstant = std::make_shared<ngraph::opset3::Constant>(
|
||||
ngraph::element::u64, ngraph::Shape{1}, 0);
|
||||
|
||||
const auto broadcast = std::make_shared<ngraph::opset3::Broadcast>(
|
||||
broadcastConstant, shapeOfNonZero, axesMappingConstant);
|
||||
|
||||
const auto result = std::make_shared<ngraph::opset3::Result>(broadcast);
|
||||
const auto resultBroadcast = std::make_shared<ngraph::opset3::Result>(broadcast);
|
||||
const auto resultNonZero = std::make_shared<ngraph::opset3::Result>(nonZero->output(0));
|
||||
|
||||
function = std::make_shared<ngraph::Function>(
|
||||
ngraph::ResultVector{result},
|
||||
ngraph::ResultVector{resultBroadcast, resultNonZero},
|
||||
ngraph::ParameterVector{tensorParam},
|
||||
"NonZero-Broadcast");
|
||||
}
|
||||
@ -50,8 +59,8 @@ protected:
|
||||
TEST_P(NonZero_Broadcast, CompareWithReference) {
|
||||
Run();
|
||||
}
|
||||
// Blocked by #-30913, #-30915
|
||||
INSTANTIATE_TEST_CASE_P(DISABLED_DynamicBroadcast, NonZero_Broadcast, ::testing::Combine(
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(DynamicBroadcast, NonZero_Broadcast, ::testing::Combine(
|
||||
::testing::Values(ngraph::element::f16, ngraph::element::f32, ngraph::element::i32),
|
||||
::testing::Values(
|
||||
TensorShape{1000},
|
||||
|
@ -15,12 +15,13 @@ using DataDims = ngraph::Shape;
|
||||
using Parameters = std::tuple<
|
||||
DataType,
|
||||
DataDims,
|
||||
LayerTestsUtils::TargetDevice
|
||||
>;
|
||||
LayerTestsUtils::TargetDevice>;
|
||||
|
||||
class NonZero_Transpose : public testing::WithParamInterface<Parameters>, public LayerTestsUtils::LayerTestsCommon {
|
||||
protected:
|
||||
void SetUp() override {
|
||||
SetRefMode(LayerTestsUtils::RefMode::CONSTANT_FOLDING);
|
||||
|
||||
const auto& parameters = GetParam();
|
||||
const auto& dataType = std::get<0>(GetParam());
|
||||
const auto& dataDims = std::get<1>(GetParam());
|
||||
@ -41,12 +42,7 @@ protected:
|
||||
};
|
||||
|
||||
TEST_P(NonZero_Transpose, CompareWithReference) {
|
||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||
|
||||
configuration.emplace(VPU_MYRIAD_CONFIG_KEY(PLATFORM), VPU_MYRIAD_CONFIG_VALUE(2480));
|
||||
ConfigurePlugin();
|
||||
|
||||
ASSERT_NO_THROW(LoadNetwork());
|
||||
Run();
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(DynamicTranspose, NonZero_Transpose,
|
||||
|
Loading…
Reference in New Issue
Block a user