[VPU] Enable DSR_MatMul tests (#1129)

* [VPU] Remove hardcoded shape type from MatMul dts

* [VPU] Forbid first GEMM input to be dynamic and transposed

* [VPU] Update DSR_MatMul tests to use DSR_TestsCommon base class
This commit is contained in:
Andrew Bakalin 2020-07-03 12:30:46 +03:00 committed by GitHub
parent a17366f621
commit 7f37714c02
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 73 additions and 80 deletions

View File

@ -15,10 +15,11 @@
namespace vpu {
void get_normalized_shape(ngraph::Output<ngraph::Node> & shape, size_t actual_rank_value, size_t max_rank_value, bool transpose) {
void get_normalized_shape(ngraph::Output<ngraph::Node>& shape, size_t actual_rank_value, size_t max_rank_value, bool transpose,
const ngraph::element::Type& elementType) {
if (const unsigned rank_diff = max_rank_value - actual_rank_value) {
ngraph::OutputVector extended_shape_parts =
{ngraph::opset3::Constant::create(ngraph::element::i64, {rank_diff}, std::vector<int64_t>(rank_diff, 1)), shape};
{ngraph::opset3::Constant::create(elementType, {rank_diff}, std::vector<int64_t>(rank_diff, 1)), shape};
shape = std::make_shared<ngraph::opset3::Concat>(extended_shape_parts, 0);
}
if (transpose) {
@ -36,19 +37,30 @@ void dynamicToStaticShapeMatMul(std::shared_ptr<ngraph::Node> target) {
VPU_THROW_UNLESS(matmul, "dynamicToStaticShapeMatMul transformation is not applicable for {}, it should be {} instead",
target, ngraph::opset3::MatMul::type_info);
auto shapeToConstant = [&target](const ngraph::Output<ngraph::Node> & output) -> std::shared_ptr<ngraph::opset3::Constant> {
auto shapeToConstant = [&target](const ngraph::Output<ngraph::Node>& output,
const ngraph::element::Type& elementType) -> std::shared_ptr<ngraph::opset3::Constant> {
VPU_THROW_UNLESS(output.get_partial_shape().is_static(),
"DynamicToStaticShape transformation for {} of type {} expects static shape on inputs without DSR",
target->get_friendly_name(), target->get_type_info());
return ngraph::opset3::Constant::create(ngraph::element::i64, {output.get_shape().size()}, output.get_shape());
return ngraph::opset3::Constant::create(elementType, {output.get_shape().size()}, output.get_shape());
};
const auto a_input_DSR = ngraph::as_type_ptr<ngraph::vpu::op::DynamicShapeResolver>(target->input_value(0).get_node_shared_ptr());
const auto b_input_DSR = ngraph::as_type_ptr<ngraph::vpu::op::DynamicShapeResolver>(target->input_value(1).get_node_shared_ptr());
if (a_input_DSR && b_input_DSR) {
VPU_THROW_UNLESS(a_input_DSR->get_input_element_type(1) == b_input_DSR->get_input_element_type(1),
"DynamicToStaticShape transformation for {} of type {} expects equal shapes data types, actual {} vs {}",
matmul->get_friendly_name(), matmul->get_type_info(),
a_input_DSR->get_input_element_type(1), b_input_DSR->get_input_element_type(1));
}
VPU_THROW_UNLESS(a_input_DSR || b_input_DSR, "DynamicToStaticShape transformation for {} of type {} expects at least one DSR as input",
target->get_friendly_name(), target->get_type_info());
ngraph::Output<ngraph::Node> a_input_shape = a_input_DSR ? a_input_DSR->input_value(1) : shapeToConstant(target->input_value(0));
ngraph::Output<ngraph::Node> b_input_shape = b_input_DSR ? b_input_DSR->input_value(1) : shapeToConstant(target->input_value(1));
const auto shapeElementType = a_input_DSR ? a_input_DSR->get_input_element_type(1) : b_input_DSR->get_input_element_type(1);
ngraph::Output<ngraph::Node> a_input_shape = a_input_DSR ? a_input_DSR->input_value(1) : shapeToConstant(target->input_value(0), shapeElementType);
ngraph::Output<ngraph::Node> b_input_shape = b_input_DSR ? b_input_DSR->input_value(1) : shapeToConstant(target->input_value(1), shapeElementType);
const auto& a_rank = a_input_shape.get_partial_shape();
const auto& b_rank = b_input_shape.get_partial_shape();
@ -57,8 +69,8 @@ void dynamicToStaticShapeMatMul(std::shared_ptr<ngraph::Node> target) {
const auto b_rank_value = b_rank[0].get_length();
const auto max_rank_value = std::max(ngraph::Dimension::value_type(2), std::max(a_rank_value, b_rank_value));
get_normalized_shape(a_input_shape, a_rank_value, max_rank_value, matmul->get_transpose_a());
get_normalized_shape(b_input_shape, b_rank_value, max_rank_value, matmul->get_transpose_b());
get_normalized_shape(a_input_shape, a_rank_value, max_rank_value, matmul->get_transpose_a(), shapeElementType);
get_normalized_shape(b_input_shape, b_rank_value, max_rank_value, matmul->get_transpose_b(), shapeElementType);
ngraph::OutputVector output_dims;
if (max_rank_value > 2) {

View File

@ -49,6 +49,11 @@ void PassImpl::run(const Model& model) {
auto inputB = stage->input(1);
auto output = stage->output(0);
VPU_THROW_UNLESS(inputA->parentDataToShapeEdge() == nullptr,
"Processing layer {} with type {} failed: first input ({} with usage {}) which is dynamic "
"doesn't support transpose parameter",
stage->name(), stage->type(), inputA->name(), inputA->usage());
const auto inputDimsA = inputA->desc().dims();
const auto K = inputDimsA[Dim::H];

View File

@ -2,12 +2,16 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "dsr_tests_common.hpp"
#include <functional_test_utils/layer_test_utils.hpp>
#include <ngraph_functions/builders.hpp>
#include <vpu/ngraph/operations/dynamic_shape_resolver.hpp>
namespace {
using namespace LayerTestsUtils::vpu;
enum DYNAMISM_MODE {
BOTH_INPUTS_DYNAMIC,
A_INPUT_DYNAMIC,
@ -15,12 +19,8 @@ enum DYNAMISM_MODE {
};
struct MatMul_input_setup {
ngraph::Shape shape;
ngraph::Shape realShape, upperBoundShape;
bool transpose;
// data for MatMul shape normalization and calculation
uint64_t rank_diff;
std::vector<int64_t> gather_idxs_for_transpose, batch_gather_idxs;
int64_t channel_idx;
};
struct MatMulTestCase {
@ -34,17 +34,18 @@ const auto combinations = testing::Combine(
DYNAMISM_MODE::B_INPUT_DYNAMIC),
testing::Values(
ngraph::element::f16,
ngraph::element::f32,
ngraph::element::i32,
ngraph::element::i64,
ngraph::element::u8),
ngraph::element::f32),
testing::Values(
// JIRA: 33925 MatMulTestCase{{{1024}, false, 1, {}, {}, 0}, {{1024, 1000}, false, 0, {}, {}, 1}},
// JIRA: 33925 MatMulTestCase{{{1024}, true, 1, {1, 0}, {}, 0}, {{1, 1000}, false, 0, {}, {}, 1}},
MatMulTestCase{{{5, 10, 1024}, false, 0, {}, {0}, 1}, {{1024, 1000}, false, 1, {}, {0}, 2}},
MatMulTestCase{{{5, 10, 1024}, false, 0, {}, {0}, 1}, {{1, 1024, 1000}, false, 0, {}, {0}, 2}},
MatMulTestCase{{{5, 1024, 10}, true, 0, {0, 2, 1}, {0}, 1}, {{1, 1000, 1024}, true, 0, {0, 2, 1}, {0}, 2}},
MatMulTestCase{{{3, 1024, 10}, true, 1, {0, 1, 3, 2}, {0, 1}, 2}, {{5, 1, 1000, 1024}, true, 0, {0, 1, 3, 2}, {0, 1}, 3}}),
MatMulTestCase{{{3, 10, 1024}, {5, 10, 1024}, false},
{{1024, 800}, {1024, 1000}, false}},
MatMulTestCase{{{2, 10, 1024}, {5, 10, 1024}, false},
{{1, 1024, 500}, {1, 1024, 1000}, false}},
MatMulTestCase{{{1, 10, 1024}, {5, 10, 1024}, false},
{{1, 800, 1024}, {1, 1000, 1024}, true}},
MatMulTestCase{{{3, 10, 1024}, {3, 10, 1024}, false},
{{2, 1, 1000, 1024}, {5, 1, 1000, 1024}, true}}),
testing::Values(CommonTestUtils::DEVICE_MYRIAD));
@ -59,78 +60,46 @@ using Parameters = std::tuple<
>;
class DSR_MatMul : public testing::WithParamInterface<Parameters>,
public LayerTestsUtils::LayerTestsCommon {
public DSR_TestsCommon {
protected:
ngraph::ParameterVector setting_up_input_dynamism(
const DYNAMISM_MODE mode,
const std::shared_ptr<ngraph::opset3::Parameter> input_A,
const std::shared_ptr<ngraph::opset3::Parameter> input_B,
std::shared_ptr<ngraph::Node>& renewed_input_A,
std::shared_ptr<ngraph::Node>& renewed_input_B,
std::shared_ptr<ngraph::Node>& A_shape_node,
std::shared_ptr<ngraph::Node>& B_shape_node) const {
ngraph::ParameterVector parameters{input_A, input_B};
auto input_A_dsr = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::i64, ngraph::Shape{input_A->get_shape().size()});
auto input_B_dsr = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::i64, ngraph::Shape{input_B->get_shape().size()});
auto dsr_A = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(input_A, input_A_dsr);
auto dsr_B = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(input_B, input_B_dsr);
switch (mode) {
case DYNAMISM_MODE::BOTH_INPUTS_DYNAMIC: {
parameters.push_back(input_A_dsr);
parameters.push_back(input_B_dsr);
renewed_input_A = dsr_A;
renewed_input_B = dsr_B;
A_shape_node = input_A_dsr;
B_shape_node = input_B_dsr;
break;
}
case DYNAMISM_MODE::A_INPUT_DYNAMIC: {
parameters.push_back(input_A_dsr);
renewed_input_A = dsr_A;
renewed_input_B = input_B;
A_shape_node = input_A_dsr;
B_shape_node = ngraph::opset3::Constant::create(ngraph::element::i64, {input_B->get_shape().size()}, input_B->get_shape());
break;
}
case DYNAMISM_MODE::B_INPUT_DYNAMIC: {
parameters.push_back(input_B_dsr);
renewed_input_A = input_A;
renewed_input_B = dsr_B;
A_shape_node = ngraph::opset3::Constant::create(ngraph::element::i64, {input_A->get_shape().size()}, input_A->get_shape());
B_shape_node = input_B_dsr;
break;
}
default:
NGRAPH_UNREACHABLE("UNKNOWN DYNAMISM MODE for MatMul DSR graph comparison test");
}
return parameters;
}
void SetUp() override {
std::shared_ptr<ngraph::Node> createTestedOp() override {
const auto& params = GetParam();
const auto& mode = std::get<0>(params);
const auto& data_type = std::get<1>(params);
const auto& matmul_setup = std::get<2>(params);
targetDevice = std::get<3>(params);
auto input_A = std::make_shared<ngraph::opset3::Parameter>(data_type, matmul_setup.A.shape);
auto input_B = std::make_shared<ngraph::opset3::Parameter>(data_type, matmul_setup.B.shape);
std::shared_ptr<ngraph::Node> inputA, inputB;
std::shared_ptr<ngraph::Node> explicit_A_input, explicit_B_input, normalized_A_shape, normalized_B_shape;
const auto parameters = setting_up_input_dynamism(mode, input_A, input_B, explicit_A_input, explicit_B_input, normalized_A_shape, normalized_B_shape);
const auto node = std::make_shared<ngraph::opset3::MatMul>(explicit_A_input, explicit_B_input, matmul_setup.A.transpose, matmul_setup.B.transpose);
switch (mode) {
case DYNAMISM_MODE::BOTH_INPUTS_DYNAMIC: {
inputA = createInputSubgraphWithDSR(data_type, DataShapeWithUpperBound{matmul_setup.A.realShape, matmul_setup.A.upperBoundShape});
inputB = createInputSubgraphWithDSR(data_type, DataShapeWithUpperBound{matmul_setup.B.realShape, matmul_setup.B.upperBoundShape});
break;
}
case DYNAMISM_MODE::A_INPUT_DYNAMIC: {
inputA = createInputSubgraphWithDSR(data_type, DataShapeWithUpperBound{matmul_setup.A.realShape, matmul_setup.A.upperBoundShape});
inputB = createParameter(data_type, matmul_setup.B.realShape);
break;
}
case DYNAMISM_MODE::B_INPUT_DYNAMIC: {
inputA = createParameter(data_type, matmul_setup.A.realShape);
inputB = createInputSubgraphWithDSR(data_type, DataShapeWithUpperBound{matmul_setup.B.realShape, matmul_setup.B.upperBoundShape});
break;
}
default:
NGRAPH_UNREACHABLE("UNKNOWN DYNAMISM MODE for MatMul DSR graph comparison test");
}
const auto result = std::make_shared<ngraph::opset3::Result>(node);
function = std::make_shared<ngraph::Function>(ngraph::ResultVector{result}, parameters, "DSR-MatMul");
const auto matMul = std::make_shared<ngraph::opset3::MatMul>(inputA, inputB, matmul_setup.A.transpose, matmul_setup.B.transpose);
return matMul;
}
};
TEST_P(DSR_MatMul, CompareWithReference) {
Run();
}
// JIRA: 33997
INSTANTIATE_TEST_CASE_P(DISABLED_DynamicMatMul, DSR_MatMul, combinations);
INSTANTIATE_TEST_CASE_P(DynamicMatMul, DSR_MatMul, combinations);
} // namespace

View File

@ -30,6 +30,13 @@ protected:
std::unordered_map<std::string, DataShape> m_shapes;
ngraph::ParameterVector m_parameterVector;
std::shared_ptr<ngraph::opset3::Parameter> createParameter(
const ngraph::element::Type& element_type,
const ngraph::PartialShape& shape) {
m_parameterVector.push_back(std::make_shared<ngraph::op::Parameter>(element_type, shape));
return m_parameterVector.back();
}
virtual std::shared_ptr<ngraph::Node> createInputSubgraphWithDSR(
const DataType& inDataType, const DataShapeWithUpperBound& shapes) {
const auto inDataParam = std::make_shared<ngraph::opset3::Parameter>(