[IE][VPU]: Enable some DSR+Op tests (#2118)
* Introduced a new way to test DSR+Op cases * Enabled DSR_Reduce, DSR_VariadicSplit, DSR_TopK, DSR_Scatter, DSR_Unsqueeze tests * Other disabled tests are still disabled until reference function is implemented. Added related comments * Reduce DSR+Op tests execution time via reducing tensor shapes
This commit is contained in:
parent
fecce756a4
commit
b0308d91a5
@ -2,6 +2,7 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <vpu/ngraph/operations/dynamic_shape_resolver.hpp>
|
||||
#include "vpu/ngraph/transformations/dynamic_to_static_shape.hpp"
|
||||
#include "vpu/ngraph/transformations/dynamic_to_static_shape_binary_elementwise.hpp"
|
||||
#include "vpu/ngraph/transformations/dynamic_to_static_shape_broadcast.hpp"
|
||||
@ -53,6 +54,19 @@ bool validateStaticShapes(const ngraph::Function& function) {
|
||||
return true;
|
||||
}
|
||||
|
||||
bool propagateUpperBoundFromExistingDSR(std::shared_ptr<ngraph::Function>& function) {
|
||||
bool function_changed = false;
|
||||
for (const auto& op : function->get_ordered_ops()) {
|
||||
if (const auto dsr = ngraph::as_type_ptr<ngraph::vpu::op::DynamicShapeResolver>(op)) {
|
||||
dsr->setMode(ngraph::vpu::op::DynamicShapeResolverMode::INFER_UPPER_BOUND_SHAPE);
|
||||
dsr->validate_and_infer_types();
|
||||
function_changed = true;
|
||||
}
|
||||
}
|
||||
|
||||
return function_changed;
|
||||
}
|
||||
|
||||
const Transformations& getDefaultTransformations() {
|
||||
static const Transformations transformations = {
|
||||
{ngraph::opset3::Add::type_info, dynamicToStaticShapeBinaryEltwise},
|
||||
@ -120,6 +134,11 @@ DynamicToStaticShape::DynamicToStaticShape(const Transformations& specificTransf
|
||||
|
||||
bool DynamicToStaticShape::run_on_function(std::shared_ptr<ngraph::Function> function) {
|
||||
bool function_changed = false;
|
||||
|
||||
// Ensure that existing DSRs in function propagate upper-bound shapes, not dynamism.
|
||||
// Basically this is possible in test cases, when the function is initially configured with DSR as inputs.
|
||||
function_changed |= propagateUpperBoundFromExistingDSR(function);
|
||||
|
||||
for (const auto& operation : function->get_ordered_ops()) {
|
||||
if (!isDynamic(*operation)) {
|
||||
continue;
|
||||
|
@ -51,8 +51,8 @@ void dynamicToStaticShapeReduce(std::shared_ptr<ngraph::Node> target) {
|
||||
if (keep_dims) {
|
||||
output_shape = std::make_shared<ngraph::opset3::ScatterElementsUpdate>(
|
||||
data_shape,
|
||||
ngraph::opset3::Constant::create(ngraph::element::i64, {axes.size()}, axes),
|
||||
ngraph::opset3::Constant::create(ngraph::element::i64, {axes.size()}, std::vector<int64_t>(axes.size(), 1)),
|
||||
ngraph::opset3::Constant::create(data_shape.get_element_type(), {axes.size()}, axes),
|
||||
ngraph::opset3::Constant::create(data_shape.get_element_type(), {axes.size()}, std::vector<int64_t>(axes.size(), 1)),
|
||||
ngraph::opset3::Constant::create(ngraph::element::i64, {1}, {0}));
|
||||
} else {
|
||||
std::vector<int64_t> range(data_rank_value);
|
||||
@ -63,7 +63,7 @@ void dynamicToStaticShapeReduce(std::shared_ptr<ngraph::Node> target) {
|
||||
|
||||
output_shape = std::make_shared<ngraph::opset3::Gather>(
|
||||
data_shape,
|
||||
ngraph::opset3::Constant::create(ngraph::element::i64, {indices.size()}, indices),
|
||||
ngraph::opset3::Constant::create(data_shape.get_element_type(), {indices.size()}, indices),
|
||||
ngraph::opset3::Constant::create(ngraph::element::i64, {1}, {0}));
|
||||
}
|
||||
const auto copied = target->clone_with_new_inputs(target->input_values());
|
||||
|
@ -73,7 +73,7 @@ void dynamicToStaticShapeVariadicSplit(std::shared_ptr<ngraph::Node> target) {
|
||||
}
|
||||
|
||||
const auto outDSR = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(copied->output(i), dsrShapeInput);
|
||||
outDSR->set_friendly_name(target->get_friendly_name() + "." + std::to_string(target->output(0).get_index()));
|
||||
outDSR->set_friendly_name(target->get_friendly_name() + "." + std::to_string(i));
|
||||
target->output(i).replace(outDSR);
|
||||
}
|
||||
}
|
||||
|
@ -84,13 +84,14 @@ void FrontEnd::parseDSR(const Model& model, const ie::CNNLayerPtr& layer, const
|
||||
"Parsing layer {} of type {} failed: if input with index {} (of name {}) has not a producer, it must have Input "
|
||||
"data usage, actual: {}", layer->name, layer->type, 1, shape->name(), shape->usage());
|
||||
} else {
|
||||
VPU_THROW_UNLESS(shape->usage() == DataUsage::Intermediate,
|
||||
VPU_THROW_UNLESS(shape->usage() == DataUsage::Intermediate || shape->usage() == DataUsage::Output,
|
||||
"Parsing layer {} of type {} failed: if input with index {} (of name {}) has a producer, it must have Intermediate "
|
||||
"data usage, actual: {}", layer->name, layer->type, 1, shape->name(), shape->usage());
|
||||
"or Output (if already has been associated with other output data) data usage, actual: {}",
|
||||
layer->name, layer->type, 1, shape->name(), shape->usage());
|
||||
}
|
||||
|
||||
auto shapeDataObject = shape;
|
||||
if (dataOutput->usage() == DataUsage::Output) {
|
||||
if (dataOutput->usage() == DataUsage::Output && shapeDataObject->usage() != DataUsage::Output) {
|
||||
const auto& shapeOutput = model->addOutputData(dataOutput->name() + "@shape", shape->desc());
|
||||
|
||||
bindData(shapeOutput, shape->origData());
|
||||
|
@ -99,7 +99,7 @@ private:
|
||||
|
||||
void initialCheckImpl() const override {
|
||||
VPU_THROW_UNLESS(input(0)->desc().type() == output(0)->desc().type(),
|
||||
"Stage {} of type {} expects that data types of input with index {} ({}) ",
|
||||
"Stage {} of type {} expects that data types of input with index {} ({}) "
|
||||
"and output with index {} ({}) are the same, but it is {} and {}",
|
||||
name(), type(), 0, input(0)->name(), 0, output(0)->name(), input(0)->desc().type(), output(0)->desc().type());
|
||||
assertInputsOutputsTypes(this,
|
||||
|
@ -54,8 +54,7 @@ protected:
|
||||
targetDevice = std::get<3>(parameters);
|
||||
|
||||
const auto inputSubgraph0 = createInputSubgraphWithDSR(inDataType, inDataShapes.lhs);
|
||||
const auto input1 = std::make_shared<ngraph::opset3::Parameter>(inDataType, inDataShapes.rhs.shape);
|
||||
m_parameterVector.push_back(input1);
|
||||
const auto input1 = createParameter(inDataType, inDataShapes.rhs.shape);
|
||||
|
||||
const auto eltwise = ngraph::helpers::getNodeSharedPtr(eltwiseType, {inputSubgraph0, input1});
|
||||
|
||||
|
@ -67,8 +67,7 @@ protected:
|
||||
|
||||
const auto inputDataSubgraph = createInputSubgraphWithDSR(inDataType, gatherSetup.inputShapes);
|
||||
|
||||
const auto indicesParam = std::make_shared<ngraph::opset3::Parameter>(idxType, gatherSetup.indexShape.shape);
|
||||
m_parameterVector.push_back(indicesParam);
|
||||
const auto indicesParam = createParameter(idxType, gatherSetup.indexShape.shape);
|
||||
m_indicesInputNames.insert(indicesParam->get_friendly_name());
|
||||
|
||||
const auto axis = ngraph::opset3::Constant::create(ngraph::element::i32, {1}, std::vector<int64_t>{gatherSetup.axis});
|
||||
|
@ -38,14 +38,14 @@ const auto combinations = testing::Combine(
|
||||
testing::Values(
|
||||
// JIRA: 33925 MatMulTestCase{{{1024}, false, 1, {}, {}, 0}, {{1024, 1000}, false, 0, {}, {}, 1}},
|
||||
// JIRA: 33925 MatMulTestCase{{{1024}, true, 1, {1, 0}, {}, 0}, {{1, 1000}, false, 0, {}, {}, 1}},
|
||||
MatMulTestCase{{{3, 10, 1024}, {5, 10, 1024}, false},
|
||||
{{1024, 800}, {1024, 1000}, false}},
|
||||
MatMulTestCase{{{2, 10, 1024}, {5, 10, 1024}, false},
|
||||
{{1, 1024, 500}, {1, 1024, 1000}, false}},
|
||||
MatMulTestCase{{{1, 10, 1024}, {5, 10, 1024}, false},
|
||||
{{1, 800, 1024}, {1, 1000, 1024}, true}},
|
||||
MatMulTestCase{{{3, 10, 1024}, {3, 10, 1024}, false},
|
||||
{{2, 1, 1000, 1024}, {5, 1, 1000, 1024}, true}}),
|
||||
MatMulTestCase{{{3, 10, 128}, {5, 10, 128}, false},
|
||||
{{128, 80}, {128, 100}, false}},
|
||||
MatMulTestCase{{{2, 10, 128}, {5, 10, 128}, false},
|
||||
{{1, 128, 50}, {1, 128, 100}, false}},
|
||||
MatMulTestCase{{{1, 10, 128}, {5, 10, 128}, false},
|
||||
{{1, 80, 128}, {1, 100, 128}, true}},
|
||||
MatMulTestCase{{{3, 10, 128}, {3, 10, 128}, false},
|
||||
{{2, 1, 100, 128}, {5, 1, 100, 128}, true}}),
|
||||
testing::Values(CommonTestUtils::DEVICE_MYRIAD));
|
||||
|
||||
|
||||
@ -91,9 +91,7 @@ protected:
|
||||
NGRAPH_UNREACHABLE("UNKNOWN DYNAMISM MODE for MatMul DSR graph comparison test");
|
||||
}
|
||||
|
||||
const auto matMul = std::make_shared<ngraph::opset3::MatMul>(inputA, inputB, matmul_setup.A.transpose, matmul_setup.B.transpose);
|
||||
|
||||
return matMul;
|
||||
return std::make_shared<ngraph::opset3::MatMul>(inputA, inputB, matmul_setup.A.transpose, matmul_setup.B.transpose);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -62,6 +62,7 @@ TEST_P(DSR_NonMaxSuppression, CompareWithReference) {
|
||||
Run();
|
||||
}
|
||||
|
||||
// #-30919
|
||||
INSTANTIATE_TEST_CASE_P(DISABLED_DynamicNonMaxSupression, DSR_NonMaxSuppression,
|
||||
::testing::Combine(
|
||||
::testing::Values(
|
||||
|
@ -2,63 +2,59 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <functional_test_utils/layer_test_utils.hpp>
|
||||
#include <ngraph_functions/builders.hpp>
|
||||
#include <vpu/ngraph/operations/dynamic_shape_resolver.hpp>
|
||||
#include "dsr_tests_common.hpp"
|
||||
|
||||
namespace {
|
||||
|
||||
using DataType = ngraph::element::Type_t;
|
||||
using DataDims = ngraph::Shape;
|
||||
using namespace LayerTestsUtils::vpu;
|
||||
|
||||
struct ReduceTestCase {
|
||||
ngraph::Shape data_shape;
|
||||
DataShapeWithUpperBound dataShapes;
|
||||
std::vector<int64_t> axes;
|
||||
bool keep_dims;
|
||||
bool keepDims;
|
||||
};
|
||||
|
||||
const auto arithmetic_combinations = testing::Combine(
|
||||
const DataShapeWithUpperBound defaultReduceShapes {
|
||||
DataShape{24, 81}, DataShape{100, 81}
|
||||
};
|
||||
|
||||
const auto arithmeticCombinations = testing::Combine(
|
||||
testing::Values(
|
||||
// ReduceMean can be replaced with avg pooling and work incorrectly #-34278
|
||||
// ngraph::opset3::ReduceMean::type_info,
|
||||
|
||||
// ReduceProd is not supported by myriad plugin
|
||||
// ngraph::opset3::ReduceProd::type_info,
|
||||
ngraph::opset3::ReduceSum::type_info,
|
||||
ngraph::opset3::ReduceMax::type_info,
|
||||
ngraph::opset3::ReduceMean::type_info,
|
||||
ngraph::opset3::ReduceMin::type_info,
|
||||
ngraph::opset3::ReduceProd::type_info,
|
||||
ngraph::opset3::ReduceSum::type_info),
|
||||
ngraph::opset3::ReduceMin::type_info),
|
||||
testing::Values(
|
||||
ngraph::element::f16,
|
||||
ngraph::element::f32,
|
||||
ngraph::element::i32,
|
||||
ngraph::element::i64,
|
||||
ngraph::element::u8),
|
||||
ngraph::element::f32),
|
||||
testing::Values(
|
||||
ngraph::element::i32,
|
||||
ngraph::element::i64,
|
||||
ngraph::element::u8),
|
||||
ngraph::element::i32),
|
||||
testing::Values(
|
||||
// data_shape, axes, keep_dims
|
||||
ReduceTestCase{{1, 3, 224, 224}, {2, 3}, true},
|
||||
ReduceTestCase{{1, 3, 224, 224}, {2, 3}, false},
|
||||
ReduceTestCase{{1, 3, 224, 224}, {0, 1, 2, 3}, true},
|
||||
ReduceTestCase{{1, 3, 224, 224}, {1, 3}, false},
|
||||
ReduceTestCase{{4}, {0}, true}),
|
||||
ReduceTestCase{defaultReduceShapes, {0}, true},
|
||||
ReduceTestCase{defaultReduceShapes, {1}, false},
|
||||
ReduceTestCase{defaultReduceShapes, {0, 1}, true},
|
||||
ReduceTestCase{defaultReduceShapes, {0, 1}, false}),
|
||||
testing::Values(CommonTestUtils::DEVICE_MYRIAD));
|
||||
|
||||
const auto logical_combinations = testing::Combine(
|
||||
const auto logicalCombinations = testing::Combine(
|
||||
testing::Values(
|
||||
ngraph::opset3::ReduceLogicalAnd::type_info,
|
||||
ngraph::opset3::ReduceLogicalOr::type_info),
|
||||
// ReduceLogicalOr is not supported by Myriad plugin
|
||||
// ngraph::opset3::ReduceLogicalOr::type_info,
|
||||
|
||||
ngraph::opset3::ReduceLogicalAnd::type_info),
|
||||
testing::Values(ngraph::element::boolean),
|
||||
testing::Values(
|
||||
ngraph::element::i32,
|
||||
ngraph::element::i64,
|
||||
ngraph::element::u8),
|
||||
ngraph::element::i32),
|
||||
testing::Values(
|
||||
// data_shape, axes, keep_dims
|
||||
ReduceTestCase{{1, 3, 224, 224}, {2, 3}, true},
|
||||
ReduceTestCase{{1, 3, 224, 224}, {2, 3}, false},
|
||||
ReduceTestCase{{1, 3, 224, 224}, {0, 1, 2, 3}, true},
|
||||
ReduceTestCase{{1, 3, 224, 224}, {1, 3}, false},
|
||||
ReduceTestCase{{4}, {0}, true}),
|
||||
ReduceTestCase{defaultReduceShapes, {0}, true},
|
||||
ReduceTestCase{defaultReduceShapes, {1}, false},
|
||||
ReduceTestCase{defaultReduceShapes, {0, 1}, true},
|
||||
ReduceTestCase{defaultReduceShapes, {0, 1}, false}),
|
||||
testing::Values(CommonTestUtils::DEVICE_MYRIAD));
|
||||
|
||||
|
||||
@ -70,33 +66,34 @@ using Parameters = std::tuple<
|
||||
LayerTestsUtils::TargetDevice
|
||||
>;
|
||||
|
||||
class DSR_Reduce : public testing::WithParamInterface<Parameters>,
|
||||
virtual public LayerTestsUtils::LayerTestsCommon {
|
||||
class DSR_Reduce : public testing::WithParamInterface<Parameters>, public DSR_TestsCommon {
|
||||
protected:
|
||||
void SetUp() override {
|
||||
std::shared_ptr<ngraph::Node> createTestedOp() override {
|
||||
const auto& parameters = GetParam();
|
||||
const auto& reduce_type = std::get<0>(parameters);
|
||||
const auto& data_type = std::get<1>(parameters);
|
||||
const auto& axes_type = std::get<2>(parameters);
|
||||
const auto& reduce_setup = std::get<3>(parameters);
|
||||
const auto& reduceType = std::get<0>(parameters);
|
||||
const auto& dataType = std::get<1>(parameters);
|
||||
const auto& axesType = std::get<2>(parameters);
|
||||
const auto& reduceSetup = std::get<3>(parameters);
|
||||
targetDevice = std::get<4>(parameters);
|
||||
|
||||
const auto data = std::make_shared<ngraph::opset3::Parameter>(data_type, reduce_setup.data_shape);
|
||||
const auto axes = ngraph::opset3::Constant::create(axes_type, {reduce_setup.axes.size()}, reduce_setup.axes);
|
||||
const auto inputSubgraph = createInputSubgraphWithDSR(dataType, reduceSetup.dataShapes);
|
||||
const auto axes = ngraph::opset3::Constant::create(axesType, {reduceSetup.axes.size()}, reduceSetup.axes);
|
||||
|
||||
const auto dims = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::i64, ngraph::Shape{reduce_setup.data_shape.size()});
|
||||
const auto reduce = ngraph::helpers::getNodeSharedPtr(reduceType, {inputSubgraph, axes});
|
||||
|
||||
const auto dsr = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(data, dims);
|
||||
const auto node = ngraph::helpers::getNodeSharedPtr(reduce_type, {dsr, axes});
|
||||
if (auto arithmetic_reduce = std::dynamic_pointer_cast<ngraph::op::util::ArithmeticReductionKeepDims>(reduce))
|
||||
arithmetic_reduce->set_keep_dims(reduceSetup.keepDims);
|
||||
else if (auto logical_reduce = std::dynamic_pointer_cast<ngraph::op::util::LogicalReductionKeepDims>(reduce))
|
||||
logical_reduce->set_keep_dims(reduceSetup.keepDims);
|
||||
reduce->validate_and_infer_types();
|
||||
|
||||
if (auto arithmetic_reduce = std::dynamic_pointer_cast<ngraph::op::util::ArithmeticReductionKeepDims>(node))
|
||||
arithmetic_reduce->set_keep_dims(reduce_setup.keep_dims);
|
||||
else if (auto logical_reduce = std::dynamic_pointer_cast<ngraph::op::util::LogicalReductionKeepDims>(node))
|
||||
logical_reduce->set_keep_dims(reduce_setup.keep_dims);
|
||||
node->validate_and_infer_types();
|
||||
const auto result = std::make_shared<ngraph::opset3::Result>(node);
|
||||
function = std::make_shared<ngraph::Function>(ngraph::ResultVector{result},
|
||||
ngraph::ParameterVector{data, dims}, "DSR-Reduce");
|
||||
// CNNNetworkNGraphImpl handles only I64, I32 and FP32 precisions and sets FP32 as default otherwise.
|
||||
// Set I32 explicitly.
|
||||
if (dataType == ngraph::element::boolean) {
|
||||
outPrc = InferenceEngine::Precision::I32;
|
||||
}
|
||||
|
||||
return reduce;
|
||||
}
|
||||
};
|
||||
|
||||
@ -104,7 +101,7 @@ TEST_P(DSR_Reduce, CompareWithReference) {
|
||||
Run();
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(DISABLED_DynamicArithmeticReduce, DSR_Reduce, arithmetic_combinations);
|
||||
INSTANTIATE_TEST_CASE_P(DISABLED_DynamicLogicalReduce, DSR_Reduce, logical_combinations);
|
||||
INSTANTIATE_TEST_CASE_P(DynamicArithmeticReduce, DSR_Reduce, arithmeticCombinations);
|
||||
INSTANTIATE_TEST_CASE_P(DynamicLogicalReduce, DSR_Reduce, logicalCombinations);
|
||||
|
||||
} // namespace
|
||||
|
@ -147,6 +147,7 @@ TEST_P(DSR_ROIAlign, CompareWithReference) {
|
||||
Run();
|
||||
}
|
||||
|
||||
// #-30909
|
||||
INSTANTIATE_TEST_CASE_P(DISABLED_DynamicROIAlign, DSR_ROIAlign,
|
||||
::testing::Combine(
|
||||
::testing::Values(
|
||||
|
@ -2,18 +2,15 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <functional_test_utils/layer_test_utils.hpp>
|
||||
#include <ngraph_functions/builders.hpp>
|
||||
#include <vpu/ngraph/operations/dynamic_shape_resolver.hpp>
|
||||
#include "dsr_tests_common.hpp"
|
||||
|
||||
namespace {
|
||||
|
||||
using DataType = ngraph::element::Type_t;
|
||||
|
||||
using namespace LayerTestsUtils::vpu;
|
||||
|
||||
struct ScatterTestCase {
|
||||
ngraph::NodeTypeInfo scatter_type_info;
|
||||
ngraph::Shape data_shape, indices_shape, updates_shape;
|
||||
ngraph::NodeTypeInfo scatterTypeInfo;
|
||||
DataShapeWithUpperBound dataShapes, indicesShape, updatesShape;
|
||||
int64_t axis;
|
||||
};
|
||||
|
||||
@ -24,30 +21,22 @@ using Parameters = std::tuple<
|
||||
LayerTestsUtils::TargetDevice
|
||||
>;
|
||||
|
||||
class DSR_Scatter : public testing::WithParamInterface<Parameters>,
|
||||
virtual public LayerTestsUtils::LayerTestsCommon {
|
||||
class DSR_Scatter : public testing::WithParamInterface<Parameters>, public DSR_TestsCommon {
|
||||
protected:
|
||||
void SetUp() override {
|
||||
std::shared_ptr<ngraph::Node> createTestedOp() override {
|
||||
const auto& parameters = GetParam();
|
||||
const auto& numeric_type = std::get<0>(parameters);
|
||||
const auto& integer_type = std::get<1>(parameters);
|
||||
const auto& scatter_setup = std::get<2>(parameters);
|
||||
const auto& numericType = std::get<0>(parameters);
|
||||
const auto& integerType = std::get<1>(parameters);
|
||||
const auto& scatterSetup = std::get<2>(parameters);
|
||||
targetDevice = std::get<3>(parameters);
|
||||
|
||||
const auto data = std::make_shared<ngraph::opset3::Parameter>(numeric_type, scatter_setup.data_shape);
|
||||
const auto indices = std::make_shared<ngraph::opset3::Parameter>(integer_type, scatter_setup.indices_shape);
|
||||
const auto updates = std::make_shared<ngraph::opset3::Parameter>(numeric_type, scatter_setup.updates_shape);
|
||||
const auto axis = std::make_shared<ngraph::opset3::Constant>(integer_type, ngraph::Shape{1}, std::vector<int64_t>{scatter_setup.axis});
|
||||
const auto inputSubgraph = createInputSubgraphWithDSR(numericType, scatterSetup.dataShapes);
|
||||
const auto indicesSubgraph = createInputSubgraphWithDSR(integerType, scatterSetup.indicesShape);
|
||||
const auto updatesSubgraph = createInputSubgraphWithDSR(numericType, scatterSetup.updatesShape);
|
||||
|
||||
const auto axis = std::make_shared<ngraph::opset3::Constant>(integerType, ngraph::Shape{1}, std::vector<int64_t>{scatterSetup.axis});
|
||||
|
||||
const auto dims = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::i64, ngraph::Shape{scatter_setup.data_shape.size()});
|
||||
const auto dsr = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(data, dims);
|
||||
|
||||
const auto node = ngraph::helpers::getNodeSharedPtr(scatter_setup.scatter_type_info, {dsr, indices, updates, axis});
|
||||
|
||||
const auto result = std::make_shared<ngraph::opset3::Result>(node);
|
||||
function = std::make_shared<ngraph::Function>(ngraph::ResultVector{result},
|
||||
ngraph::ParameterVector{data, indices, updates, dims}, scatter_setup.scatter_type_info.name);
|
||||
return ngraph::helpers::getNodeSharedPtr(scatterSetup.scatterTypeInfo, {inputSubgraph, indicesSubgraph, updatesSubgraph, axis});
|
||||
}
|
||||
};
|
||||
|
||||
@ -55,20 +44,19 @@ TEST_P(DSR_Scatter, CompareWithReference) {
|
||||
Run();
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(DISABLED_DynamicScatter, DSR_Scatter,
|
||||
INSTANTIATE_TEST_CASE_P(DynamicScatter, DSR_Scatter,
|
||||
::testing::Combine(
|
||||
testing::Values(
|
||||
ngraph::element::f16,
|
||||
ngraph::element::f32,
|
||||
ngraph::element::i32,
|
||||
ngraph::element::i64,
|
||||
ngraph::element::u8),
|
||||
ngraph::element::f16),
|
||||
testing::Values(
|
||||
ngraph::element::i32,
|
||||
ngraph::element::i64,
|
||||
ngraph::element::u8),
|
||||
ngraph::element::i32),
|
||||
testing::Values(
|
||||
ScatterTestCase{ngraph::opset3::ScatterUpdate::type_info, {1000, 256, 10, 15}, {125, 20}, {1000, 125, 20, 10, 15}, 1}),
|
||||
ScatterTestCase{
|
||||
ngraph::opset3::ScatterUpdate::type_info,
|
||||
{{84, 256, 7, 7}, {100, 256, 7, 7}},
|
||||
{{84}, {100}},
|
||||
{{84, 256, 7, 7}, {100, 256, 7, 7}},
|
||||
0}),
|
||||
::testing::Values(CommonTestUtils::DEVICE_MYRIAD)));
|
||||
|
||||
} // namespace
|
||||
|
@ -41,9 +41,7 @@ protected:
|
||||
|
||||
const auto axes = std::make_shared<ngraph::opset3::Constant>(
|
||||
ngraph::element::i64, ngraph::Shape{squeezeAxes.size()}, squeezeAxes);
|
||||
const auto squeeze = std::make_shared<ngraph::opset3::Squeeze>(inputSubgraph, axes);
|
||||
|
||||
return squeeze;
|
||||
return std::make_shared<ngraph::opset3::Squeeze>(inputSubgraph, axes);
|
||||
}
|
||||
};
|
||||
|
||||
@ -53,7 +51,7 @@ TEST_P(DSR_Squeeze, CompareWithReference) {
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(DynamicSqueeze, DSR_Squeeze,
|
||||
::testing::Combine(
|
||||
::testing::Values(ngraph::element::f16, ngraph::element::f32, ngraph::element::i32),
|
||||
::testing::Values(ngraph::element::f16, ngraph::element::i32),
|
||||
::testing::Values(
|
||||
// input_shape, squeeze_axis
|
||||
SqueezeTestCase{DataShapeWithUpperBound{{1, 1, 1000}, {1, 1, 1500}}, AxisVector{-2}},
|
||||
|
@ -50,22 +50,13 @@ protected:
|
||||
m_parameterVector.push_back(inDataShapeParam);
|
||||
|
||||
const auto dsr = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(
|
||||
inDataParam, inDataShapeParam);
|
||||
inDataParam, inDataShapeParam, ngraph::vpu::op::DynamicShapeResolverMode::INFER_DYNAMIC_SHAPE);
|
||||
|
||||
return dsr;
|
||||
}
|
||||
|
||||
virtual std::shared_ptr<ngraph::Node> createTestedOp() = 0;
|
||||
|
||||
void switchDSRMode(const ngraph::vpu::op::DynamicShapeResolverMode& mode) {
|
||||
for (const auto& op : function->get_ordered_ops()) {
|
||||
if (const auto dsr = ngraph::as_type_ptr<ngraph::vpu::op::DynamicShapeResolver>(op)) {
|
||||
dsr->setMode(mode);
|
||||
}
|
||||
}
|
||||
function->validate_nodes_and_infer_types();
|
||||
}
|
||||
|
||||
void SetUp() override {
|
||||
SetRefMode(LayerTestsUtils::RefMode::CONSTANT_FOLDING);
|
||||
configuration[InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH] = CONFIG_VALUE(NO);
|
||||
@ -74,23 +65,15 @@ protected:
|
||||
}
|
||||
|
||||
const auto testedOp = createTestedOp();
|
||||
const auto result = std::make_shared<ngraph::opset3::Result>(testedOp);
|
||||
ngraph::ResultVector results{};
|
||||
for (const auto& output : testedOp->outputs()) {
|
||||
results.emplace_back(std::make_shared<ngraph::opset3::Result>(output));
|
||||
}
|
||||
|
||||
function = std::make_shared<ngraph::Function>(
|
||||
ngraph::NodeVector{result},
|
||||
results,
|
||||
m_parameterVector,
|
||||
"DSR-" + std::string(testedOp->get_type_name()));
|
||||
|
||||
// Get the output shape as if it was in a graph with dynamism
|
||||
switchDSRMode(ngraph::vpu::op::DynamicShapeResolverMode::INFER_DYNAMIC_SHAPE);
|
||||
const auto outputDynamicShape = testedOp->get_output_partial_shape(0);
|
||||
|
||||
// Switch DSR mode back to INFER_UPPER_BOUND_SHAPE but set dynamic output shape for tested op.
|
||||
// It is needed to trigger appropriate DTS transformation.
|
||||
switchDSRMode(ngraph::vpu::op::DynamicShapeResolverMode::INFER_UPPER_BOUND_SHAPE);
|
||||
testedOp->set_output_type(0, testedOp->get_input_element_type(0), outputDynamicShape);
|
||||
|
||||
::vpu::DynamicToStaticShape().run_on_function(function);
|
||||
}
|
||||
|
||||
InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo& info) const override {
|
||||
@ -109,12 +92,6 @@ protected:
|
||||
|
||||
return blob;
|
||||
}
|
||||
|
||||
void Validate() override {
|
||||
switchDSRMode(ngraph::vpu::op::DynamicShapeResolverMode::INFER_DYNAMIC_SHAPE);
|
||||
LayerTestsCommon::Validate();
|
||||
switchDSRMode(ngraph::vpu::op::DynamicShapeResolverMode::INFER_UPPER_BOUND_SHAPE);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace vpu
|
||||
|
@ -2,37 +2,26 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <functional_test_utils/layer_test_utils.hpp>
|
||||
#include <ngraph_functions/builders.hpp>
|
||||
#include <vpu/ngraph/operations/dynamic_shape_resolver.hpp>
|
||||
#include "dsr_tests_common.hpp"
|
||||
|
||||
namespace {
|
||||
|
||||
using DataType = ngraph::element::Type_t;
|
||||
using DataDims = ngraph::Shape;
|
||||
using namespace LayerTestsUtils::vpu;
|
||||
|
||||
struct TopKTestCase {
|
||||
ngraph::Shape data_shape;
|
||||
int64_t k, axis, first_split_point, second_split_point;
|
||||
DataShapeWithUpperBound dataShapes;
|
||||
int64_t k;
|
||||
int64_t axis;
|
||||
};
|
||||
|
||||
const auto combinations = testing::Combine(
|
||||
testing::Values(
|
||||
ngraph::element::f16,
|
||||
ngraph::element::f32,
|
||||
ngraph::element::i32,
|
||||
ngraph::element::i64,
|
||||
ngraph::element::u8),
|
||||
ngraph::element::f16),
|
||||
testing::Values(
|
||||
ngraph::element::i32,
|
||||
ngraph::element::i64),
|
||||
ngraph::element::i32),
|
||||
testing::Values(
|
||||
TopKTestCase{{6}, 5, 0, 0, 0},
|
||||
TopKTestCase{{6, 12, 10, 24}, 5, 0, 0, 1},
|
||||
TopKTestCase{{6, 12}, 10, 1, 1, 2},
|
||||
TopKTestCase{{6, 12, 10, 24}, 7, 3, 3, 4},
|
||||
TopKTestCase{{6, 12, 10, 24}, 20, -1, 3, 4},
|
||||
TopKTestCase{{6, 12, 10, 24}, 3, -4, 0, 1}),
|
||||
TopKTestCase{{{12345}, {80000}}, 75, 0},
|
||||
TopKTestCase{{{1234}, {4663}}, 70, 0}),
|
||||
testing::Values(CommonTestUtils::DEVICE_MYRIAD));
|
||||
|
||||
|
||||
@ -43,33 +32,19 @@ using Parameters = std::tuple<
|
||||
LayerTestsUtils::TargetDevice
|
||||
>;
|
||||
|
||||
class DSR_TopK_Const : public testing::WithParamInterface<Parameters>,
|
||||
virtual public LayerTestsUtils::LayerTestsCommon {
|
||||
class DSR_TopK_Const : public testing::WithParamInterface<Parameters>, public DSR_TestsCommon {
|
||||
protected:
|
||||
void SetUp() override {
|
||||
std::shared_ptr<ngraph::Node> createTestedOp() override {
|
||||
const auto& parameters = GetParam();
|
||||
const auto& data_type = std::get<0>(parameters);
|
||||
const auto& idx_type = std::get<1>(parameters);
|
||||
const auto& topk_setup = std::get<2>(parameters);
|
||||
const auto& dataType = std::get<0>(parameters);
|
||||
const auto& idxType = std::get<1>(parameters);
|
||||
const auto& topkSetup = std::get<2>(parameters);
|
||||
targetDevice = std::get<3>(parameters);
|
||||
|
||||
const auto data = std::make_shared<ngraph::opset3::Parameter>(data_type, topk_setup.data_shape);
|
||||
const auto k = ngraph::opset3::Constant::create(idx_type, {}, std::vector<int64_t>{topk_setup.k});
|
||||
const auto inputSubgraph = createInputSubgraphWithDSR(dataType, topkSetup.dataShapes);
|
||||
const auto k = ngraph::opset3::Constant::create(idxType, {}, std::vector<int64_t>{topkSetup.k});
|
||||
|
||||
const auto dims = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::i64, ngraph::Shape{topk_setup.data_shape.size()});
|
||||
|
||||
const auto dsr = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(data, dims);
|
||||
const auto node = std::make_shared<ngraph::opset3::TopK>(dsr, k, topk_setup.axis, "max", "value");
|
||||
|
||||
// tests are capable to compare functions with one result only, but TopK has 2 of them and they are of different types
|
||||
ngraph::OutputVector converted;
|
||||
for (const auto& result : {node->output(0), node->output(1)}) {
|
||||
converted.push_back(std::make_shared<ngraph::opset3::Convert>(result, ngraph::element::f32));
|
||||
}
|
||||
const auto tests_wa = std::make_shared<ngraph::opset3::Concat>(converted, topk_setup.axis);
|
||||
const auto result = std::make_shared<ngraph::opset3::Result>(tests_wa);
|
||||
function = std::make_shared<ngraph::Function>(ngraph::ResultVector{result},
|
||||
ngraph::ParameterVector{data, dims}, "DSR-TopKConst");
|
||||
return std::make_shared<ngraph::opset3::TopK>(inputSubgraph, k, topkSetup.axis, "max", "value");
|
||||
}
|
||||
};
|
||||
|
||||
@ -77,40 +52,29 @@ TEST_P(DSR_TopK_Const, CompareWithReference) {
|
||||
Run();
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(DISABLED_DynamicTopKConst, DSR_TopK_Const, combinations);
|
||||
INSTANTIATE_TEST_CASE_P(DynamicTopKConst, DSR_TopK_Const, combinations);
|
||||
|
||||
class DSR_TopK : public testing::WithParamInterface<Parameters>,
|
||||
virtual public LayerTestsUtils::LayerTestsCommon {
|
||||
class DSR_TopK : public testing::WithParamInterface<Parameters>, public DSR_TestsCommon {
|
||||
protected:
|
||||
void SetUp() override {
|
||||
std::shared_ptr<ngraph::Node> createTestedOp() override {
|
||||
const auto& parameters = GetParam();
|
||||
const auto& data_type = std::get<0>(parameters);
|
||||
const auto& idx_type = std::get<1>(parameters);
|
||||
const auto& topk_setup = std::get<2>(parameters);
|
||||
const auto& dataType = std::get<0>(parameters);
|
||||
const auto& idxType = std::get<1>(parameters);
|
||||
const auto& topkSetup = std::get<2>(parameters);
|
||||
targetDevice = std::get<3>(parameters);
|
||||
|
||||
const auto data = std::make_shared<ngraph::opset3::Parameter>(data_type, topk_setup.data_shape);
|
||||
const auto dims = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::i64, ngraph::Shape{topk_setup.data_shape.size()});
|
||||
const auto inputSubgraph = createInputSubgraphWithDSR(dataType, topkSetup.dataShapes);
|
||||
|
||||
const auto gather = std::make_shared<ngraph::opset3::Gather>(dims,
|
||||
ngraph::opset3::Constant::create(ngraph::element::i32, {1}, {topk_setup.axis}),
|
||||
ngraph::opset3::Constant::create(ngraph::element::i32, {1}, {0}));
|
||||
const auto upper_bound = ngraph::opset3::Constant::create(dims->get_element_type(), {1}, {100});
|
||||
const auto gather = std::make_shared<ngraph::opset3::Gather>(
|
||||
inputSubgraph->input_value(1),
|
||||
ngraph::opset3::Constant::create(ngraph::element::i32, {1}, {topkSetup.axis}),
|
||||
ngraph::opset3::Constant::create(ngraph::element::i32, {1}, {0}));
|
||||
const auto upper_bound = ngraph::opset3::Constant::create(inputSubgraph->get_input_element_type(1), {1}, {topkSetup.k});
|
||||
const auto concat = std::make_shared<ngraph::opset3::Concat>(ngraph::OutputVector{upper_bound, gather}, 0);
|
||||
const auto k = std::make_shared<ngraph::opset3::ReduceMin>(concat, ngraph::opset3::Constant::create(ngraph::element::i32, {1}, {0}), false);
|
||||
const auto k = std::make_shared<ngraph::opset3::ReduceMin>(
|
||||
concat, ngraph::opset3::Constant::create(ngraph::element::i32, {1}, {0}), false);
|
||||
|
||||
const auto dsr = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(data, dims);
|
||||
const auto node = std::make_shared<ngraph::opset3::TopK>(dsr, k, topk_setup.axis, "max", "value");
|
||||
|
||||
// tests are capable to compare functions with one result only, but TopK has 2 of them and they are of different types
|
||||
ngraph::OutputVector converted;
|
||||
for (const auto& result : {node->output(0), node->output(1)}) {
|
||||
converted.push_back(std::make_shared<ngraph::opset3::Convert>(result, ngraph::element::f32));
|
||||
}
|
||||
const auto tests_wa = std::make_shared<ngraph::opset3::Concat>(converted, topk_setup.axis);
|
||||
const auto result = std::make_shared<ngraph::opset3::Result>(tests_wa);
|
||||
function = std::make_shared<ngraph::Function>(ngraph::ResultVector{result},
|
||||
ngraph::ParameterVector{data, dims}, "DSR-TopK");
|
||||
return std::make_shared<ngraph::opset3::TopK>(inputSubgraph, k, topkSetup.axis, "max", "value");
|
||||
}
|
||||
};
|
||||
|
||||
@ -118,6 +82,6 @@ TEST_P(DSR_TopK, CompareWithReference) {
|
||||
Run();
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(DISABLED_DynamicTopKConst, DSR_TopK, combinations);
|
||||
INSTANTIATE_TEST_CASE_P(DynamicTopKConst, DSR_TopK, combinations);
|
||||
|
||||
} // namespace
|
||||
|
@ -2,20 +2,17 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <functional_test_utils/layer_test_utils.hpp>
|
||||
#include <ngraph_functions/builders.hpp>
|
||||
#include <vpu/ngraph/operations/dynamic_shape_resolver.hpp>
|
||||
|
||||
#include "dsr_tests_common.hpp"
|
||||
|
||||
namespace {
|
||||
|
||||
using DataType = ngraph::element::Type_t;
|
||||
using DataDims = ngraph::Shape;
|
||||
using axis_vec = std::vector<int64_t>;
|
||||
using namespace LayerTestsUtils::vpu;
|
||||
|
||||
using AxisVector = std::vector<int64_t>;
|
||||
|
||||
struct UnsqueezeTestCase {
|
||||
DataDims input_shape;
|
||||
axis_vec unsqueeze_axes;
|
||||
DataShapeWithUpperBound inputShapes;
|
||||
AxisVector unsqueezeAxes;
|
||||
};
|
||||
|
||||
using Parameters = std::tuple<
|
||||
@ -24,28 +21,22 @@ using Parameters = std::tuple<
|
||||
LayerTestsUtils::TargetDevice
|
||||
>;
|
||||
|
||||
class DSR_Unsqueeze : public testing::WithParamInterface<Parameters>, virtual public LayerTestsUtils::LayerTestsCommon {
|
||||
class DSR_Unsqueeze : public testing::WithParamInterface<Parameters>, public DSR_TestsCommon {
|
||||
protected:
|
||||
void SetUp() override {
|
||||
std::shared_ptr<ngraph::Node> createTestedOp() override {
|
||||
const auto& parameters = GetParam();
|
||||
const auto& data_type = std::get<0>(parameters);
|
||||
const auto& squeeze_test_case = std::get<1>(parameters);
|
||||
const auto& dataType = std::get<0>(parameters);
|
||||
const auto& squeezeTestCase = std::get<1>(parameters);
|
||||
|
||||
const auto& input_shape = squeeze_test_case.input_shape;
|
||||
const auto& unsqueeze_axes = squeeze_test_case.unsqueeze_axes;
|
||||
const auto& inputShapes = squeezeTestCase.inputShapes;
|
||||
const auto& unsqueezeAxes = squeezeTestCase.unsqueezeAxes;
|
||||
|
||||
targetDevice = std::get<2>(GetParam());
|
||||
|
||||
const auto data = std::make_shared<ngraph::opset3::Parameter>(data_type, input_shape);
|
||||
const auto dims = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::i64, ngraph::Shape{input_shape.size()});
|
||||
const auto inputSubgraph = createInputSubgraphWithDSR(dataType, inputShapes);
|
||||
const auto axes = std::make_shared<ngraph::opset3::Constant>(ngraph::element::i64, ngraph::Shape{unsqueezeAxes.size()}, unsqueezeAxes);
|
||||
|
||||
const auto dsr = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(data, dims);
|
||||
|
||||
const auto axes = std::make_shared<ngraph::opset3::Constant>(ngraph::element::i64, ngraph::Shape{unsqueeze_axes.size()}, unsqueeze_axes);
|
||||
const auto node = std::make_shared<ngraph::opset3::Unsqueeze>(dsr, axes);
|
||||
|
||||
const auto result = std::make_shared<ngraph::opset3::Result>(node);
|
||||
function = std::make_shared<ngraph::Function>(ngraph::ResultVector{result}, ngraph::ParameterVector{data, dims}, "DSR-Unsqueeze");
|
||||
return std::make_shared<ngraph::opset3::Unsqueeze>(inputSubgraph, axes);
|
||||
}
|
||||
};
|
||||
|
||||
@ -53,15 +44,15 @@ TEST_P(DSR_Unsqueeze, CompareWithReference) {
|
||||
Run();
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(DISABLED_DynamicUnsqueeze, DSR_Unsqueeze,
|
||||
INSTANTIATE_TEST_CASE_P(DynamicUnsqueeze, DSR_Unsqueeze,
|
||||
::testing::Combine(
|
||||
::testing::Values(ngraph::element::f16, ngraph::element::f32, ngraph::element::i32),
|
||||
::testing::Values(ngraph::element::f16, ngraph::element::i32),
|
||||
::testing::Values(
|
||||
// input_shape, unsqueeze_axis
|
||||
UnsqueezeTestCase{DataDims{10, 100, 1000}, axis_vec{-1, -3}},
|
||||
UnsqueezeTestCase{DataDims{10, 100, 1000}, axis_vec{0}},
|
||||
UnsqueezeTestCase{DataDims{10}, axis_vec{1}},
|
||||
UnsqueezeTestCase{DataDims{10}, axis_vec{0}}),
|
||||
// inputShapes, unsqueezeAxes
|
||||
UnsqueezeTestCase{DataShapeWithUpperBound{{789, 4}, {1000, 4}}, AxisVector{-1, -3}},
|
||||
UnsqueezeTestCase{DataShapeWithUpperBound{{789, 4}, {1000, 4}}, AxisVector{0}},
|
||||
UnsqueezeTestCase{DataShapeWithUpperBound{{789, 4}, {1000, 4}}, AxisVector{1}},
|
||||
UnsqueezeTestCase{DataShapeWithUpperBound{{789, 4}, {1000, 4}}, AxisVector{2}}),
|
||||
::testing::Values(CommonTestUtils::DEVICE_MYRIAD)));
|
||||
|
||||
} // namespace
|
||||
|
@ -2,39 +2,28 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <functional_test_utils/layer_test_utils.hpp>
|
||||
#include <ngraph_functions/builders.hpp>
|
||||
#include <vpu/ngraph/operations/dynamic_shape_resolver.hpp>
|
||||
#include "dsr_tests_common.hpp"
|
||||
|
||||
namespace {
|
||||
|
||||
using DataType = ngraph::element::Type_t;
|
||||
using DataDims = ngraph::Shape;
|
||||
|
||||
using namespace LayerTestsUtils::vpu;
|
||||
|
||||
struct VariadicSplitTestCase {
|
||||
ngraph::Shape data_shape;
|
||||
std::vector<int64_t> split_lengths;
|
||||
int64_t axis, first_split_point, second_split_point;
|
||||
DataShapeWithUpperBound dataShapes;
|
||||
std::vector<int64_t> splitLengths;
|
||||
int64_t axis;
|
||||
};
|
||||
|
||||
const auto combinations = testing::Combine(
|
||||
testing::Values(
|
||||
ngraph::element::f16,
|
||||
ngraph::element::f32,
|
||||
ngraph::element::i32,
|
||||
ngraph::element::i64,
|
||||
ngraph::element::u8),
|
||||
ngraph::element::f16),
|
||||
testing::Values(
|
||||
ngraph::element::i32,
|
||||
ngraph::element::i64),
|
||||
ngraph::element::i32),
|
||||
testing::Values(
|
||||
VariadicSplitTestCase{{6}, {2, 1, 2, 1}, 0, 0, 0},
|
||||
VariadicSplitTestCase{{6, 12, 10, 24}, {1, 1, 3, 1}, 0, 0, 1},
|
||||
VariadicSplitTestCase{{6, 12}, {7, 2, 1, 2}, 1, 1, 2},
|
||||
VariadicSplitTestCase{{6, 12, 10, 24}, {10, 14}, 3, 3, 4},
|
||||
VariadicSplitTestCase{{6, 12, 10, 24}, {14, 10}, -1, 3, 4},
|
||||
VariadicSplitTestCase{{6, 12, 10, 24}, {6}, -4, 0, 1}),
|
||||
VariadicSplitTestCase{{{6, 12, 10}, {6, 12, 15}}, {1, 1, 3, 1}, 0},
|
||||
VariadicSplitTestCase{{{6, 12}, {10, 12}}, {7, 2, 1, 2}, 1},
|
||||
VariadicSplitTestCase{{{6, 12, 10, 24}, {6, 12, 10, 50}}, {4, 6}, 2},
|
||||
VariadicSplitTestCase{{{6, 12, 10, 24}, {6, 12, 10, 50}}, {4, 6}, -2}),
|
||||
testing::Values(CommonTestUtils::DEVICE_MYRIAD));
|
||||
|
||||
|
||||
@ -45,30 +34,22 @@ using Parameters = std::tuple<
|
||||
LayerTestsUtils::TargetDevice
|
||||
>;
|
||||
|
||||
class DSR_VariadicSplit : public testing::WithParamInterface<Parameters>,
|
||||
virtual public LayerTestsUtils::LayerTestsCommon {
|
||||
class DSR_VariadicSplit : public testing::WithParamInterface<Parameters>, public DSR_TestsCommon {
|
||||
protected:
|
||||
void SetUp() override {
|
||||
std::shared_ptr<ngraph::Node> createTestedOp() override {
|
||||
const auto& parameters = GetParam();
|
||||
const auto& data_type = std::get<0>(parameters);
|
||||
const auto& idx_type = std::get<1>(parameters);
|
||||
const auto& variadic_split_setup = std::get<2>(parameters);
|
||||
const auto& dataType = std::get<0>(parameters);
|
||||
const auto& idxType = std::get<1>(parameters);
|
||||
const auto& variadicSplitSetup = std::get<2>(parameters);
|
||||
targetDevice = std::get<3>(parameters);
|
||||
|
||||
const auto data = std::make_shared<ngraph::opset3::Parameter>(data_type, variadic_split_setup.data_shape);
|
||||
const auto axis = ngraph::opset3::Constant::create(idx_type, {}, std::vector<int64_t>{variadic_split_setup.axis});
|
||||
const auto split_lengths = ngraph::opset3::Constant::create(idx_type,
|
||||
{variadic_split_setup.split_lengths.size()}, std::vector<int64_t>{variadic_split_setup.split_lengths});
|
||||
const auto inputSubgraph = createInputSubgraphWithDSR(dataType, variadicSplitSetup.dataShapes);
|
||||
|
||||
const auto dims = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::i64, ngraph::Shape{variadic_split_setup.data_shape.size()});
|
||||
const auto axis = ngraph::opset3::Constant::create(idxType, {}, std::vector<int64_t>{variadicSplitSetup.axis});
|
||||
const auto splitLengths = ngraph::opset3::Constant::create(idxType,
|
||||
{variadicSplitSetup.splitLengths.size()}, std::vector<int64_t>{variadicSplitSetup.splitLengths});
|
||||
|
||||
const auto dsr = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(data, dims);
|
||||
const auto node = std::make_shared<ngraph::opset3::VariadicSplit>(dsr, axis, split_lengths);
|
||||
|
||||
const auto tests_wa = std::make_shared<ngraph::opset3::Concat>(node->outputs(), variadic_split_setup.axis);
|
||||
const auto result = std::make_shared<ngraph::opset3::Result>(tests_wa);
|
||||
function = std::make_shared<ngraph::Function>(ngraph::ResultVector{result},
|
||||
ngraph::ParameterVector{data, dims}, "DSR-VariadicSplit");
|
||||
return std::make_shared<ngraph::opset3::VariadicSplit>(inputSubgraph, axis, splitLengths);
|
||||
}
|
||||
};
|
||||
|
||||
@ -76,6 +57,6 @@ TEST_P(DSR_VariadicSplit, CompareWithReference) {
|
||||
Run();
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(DISABLED_DynamicGatherData, DSR_VariadicSplit, combinations);
|
||||
INSTANTIATE_TEST_CASE_P(DynamicGatherData, DSR_VariadicSplit, combinations);
|
||||
|
||||
} // namespace
|
||||
|
@ -112,6 +112,23 @@ TEST_F(DSRParsingTests, DSRParserDoesntAssertOnCorrectIO) {
|
||||
{inputStage->output(0), inputStage->output(1)}, _testModel.getOutputs()));
|
||||
}
|
||||
|
||||
TEST_F(DSRParsingTests, DSRParserDoesntAssertOnTwoOutputsWithSameShapeData) {
|
||||
_testModel.createInputs({_dataDesc});
|
||||
_testModel.createOutputs({_dataDesc, _dataDesc});
|
||||
|
||||
const auto& inputStage = _testModel.addStage(
|
||||
{InputInfo::fromNetwork(0)},
|
||||
{OutputInfo::intermediate(_dataDesc), OutputInfo::intermediate(_dataDesc), OutputInfo::intermediate(_correstShapeDesc)});
|
||||
|
||||
const auto& dsrLayer1 = createDSRLayer();
|
||||
const auto& dsrLayer2 = createDSRLayer();
|
||||
|
||||
ASSERT_NO_THROW(frontEnd->parseDSR(_testModel.getBaseModel(), dsrLayer1,
|
||||
{inputStage->output(0), inputStage->output(2)}, {_testModel.getOutputs()[0]}));
|
||||
ASSERT_NO_THROW(frontEnd->parseDSR(_testModel.getBaseModel(), dsrLayer2,
|
||||
{inputStage->output(1), inputStage->output(2)}, {_testModel.getOutputs()[1]}));
|
||||
}
|
||||
|
||||
TEST_F(DSRParsingTests, DSRParserPreservesConnectionsOnOutputDSR) {
|
||||
_testModel.createInputs({_dataDesc});
|
||||
_testModel.createOutputs({_dataDesc});
|
||||
|
Loading…
Reference in New Issue
Block a user