[CPU] Enable skipped custom shape infer test cases (#19037)

* enable eltwise skip test

Signed-off-by: HU Yuan2 <yuan2.hu@intel.com>

* enable skipped test of onehot

Signed-off-by: HU Yuan2 <yuan2.hu@intel.com>

* remove shapeof OD test. cpu node shape_of don't support it

Signed-off-by: HU Yuan2 <yuan2.hu@intel.com>

* enable skipped test case of strideslice

remove default stride test

Signed-off-by: HU Yuan2 <yuan2.hu@intel.com>

* enable skipped test case of matmul

remove some test cases, custom matmul only support some rank

Signed-off-by: HU Yuan2 <yuan2.hu@intel.com>

* fix eltwise autob issue

powerstatic don't get the autob attibute from origin op

Signed-off-by: HU Yuan2 <yuan2.hu@intel.com>

* Revert "fix eltwise autob issue"

This reverts commit 1139296411.

* Revert "enable eltwise skip test"

This reverts commit c9f0a6f225.

* disable eltwise none autob test

Signed-off-by: HU Yuan2 <yuan2.hu@intel.com>

* add error message for OPENVINO_ASSERT

go NgraphShapeInfer branch in matmul node
when input0 rank is not equal to input1 rank

Signed-off-by: HU Yuan2 <yuan2.hu@intel.com>

* create ticket for EltwiseShapeInfer and skip the test case

Signed-off-by: HU Yuan2 <yuan2.hu@intel.com>

* revert test case order in matmul

Signed-off-by: HU Yuan2 <yuan2.hu@intel.com>

---------

Signed-off-by: HU Yuan2 <yuan2.hu@intel.com>
This commit is contained in:
Yuan Hu 2023-10-10 20:03:59 +08:00 committed by GitHub
parent 0dcde7f7bc
commit b358d283d0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 36 additions and 54 deletions

View File

@ -29,7 +29,7 @@ Result MMShapeInfer::infer(
if (rankA == 1 && rankB == 1 && shapeA[0] == shapeB[0]) {
return {{m_shapeY}, ShapeInferStatus::success};
}
OPENVINO_ASSERT(m_out_rank >= 2, "The output rank should be greater or euqal to 2.");
m_shapeY[m_out_rank-2] = m_transpose_a ? shapeA[rankA-1] : shapeA[rankA-2];
m_shapeY[m_out_rank-1] = m_transpose_b ? shapeB[rankB-2] : shapeB[rankB-1];
@ -54,7 +54,14 @@ ShapeInferPtr MMShapeInferFactory::makeShapeInfer() const {
const auto output_rank = matmul->get_output_partial_shape(0).rank().get_length();
const bool transpose_a = matmul->get_transpose_a();
const bool transpose_b = matmul->get_transpose_b();
return std::make_shared<MMShapeInfer>(output_rank, transpose_a, transpose_b);
const auto input_rank0 = matmul->get_input_partial_shape(0).rank().get_length();
const auto input_rank1 = matmul->get_input_partial_shape(1).rank().get_length();
if (input_rank0 == input_rank1) {
return std::make_shared<MMShapeInfer>(output_rank, transpose_a, transpose_b);
} else {
return std::make_shared<NgraphShapeInfer>(make_shape_inference(m_op), EMPTY_PORT_MASK);
}
} else {
OPENVINO_THROW("Unexpected operation type in the MatMul shape inference factory");
}

View File

@ -4,6 +4,7 @@
#include <node.h>
#include "shape_inference/shape_inference_cpu.hpp"
#include <shape_inference/shape_inference_ngraph.hpp>
#pragma once
namespace ov {

View File

@ -21,7 +21,9 @@ Result OneHotShapeInfer::infer(
const std::vector<std::reference_wrapper<const VectorDims>>& input_shapes,
const std::unordered_map<size_t, MemoryPtr>& data_dependency) {
auto depth = reinterpret_cast<int32_t *>(data_dependency.at(1)->getData())[0];
if (depth < 0) {
OPENVINO_THROW("OneHot depth value can't be negative.");
}
auto result = input_shapes.front().get();
result.insert(result.begin() + m_axis, depth);

View File

@ -50,8 +50,15 @@ Result StridedSliceShapeInfer::infer(
if ((i >= shapeBegin[0]) || (shapeIn[i] == 0)) {
m_outputShape[new_idx] = shapeIn[i];
} else {
auto begin = m_begin_mask_set.count(i) ? 0 : beginPtr[i];
auto end = m_end_mask_set.count(i) ? shapeIn[i] : endPtr[i];
int32_t begin = 0;
int32_t end = 0;
if (stridePtr[i] < 0) {
begin = m_begin_mask_set.count(i) ? shapeIn[i] : beginPtr[i];
end = m_end_mask_set.count(i) ? (-1 - shapeIn[i]) : endPtr[i];
} else {
begin = m_begin_mask_set.count(i) ? 0 : beginPtr[i];
end = m_end_mask_set.count(i) ? shapeIn[i] : endPtr[i];
}
m_outputShape[new_idx] = ov::op::slice::get_sliced_value(shapeIn[i], begin, end, stridePtr[i]);
}
new_idx += 1;

View File

@ -83,7 +83,7 @@ TYPED_TEST_P(CpuShapeInferenceTest_BEA, shape_inference_aubtob_none) {
}
TYPED_TEST_P(CpuShapeInferenceTest_BEA, shape_inference_aubtob_none_incompatible_shapes) {
GTEST_SKIP() << "Skipping test, please check CVS-108946";
GTEST_SKIP() << "CVS-122351 Skipping test, eltwiseShapeInfer only implemented numpy type boardcast";
auto A = std::make_shared<op::v0::Parameter>(element::f32, PartialShape{-1, -1, -1, -1});
auto B = std::make_shared<op::v0::Parameter>(element::f32, PartialShape{-1, -1, -1, -1});
@ -92,7 +92,6 @@ TYPED_TEST_P(CpuShapeInferenceTest_BEA, shape_inference_aubtob_none_incompatible
std::vector<StaticShape> static_input_shapes = {StaticShape{3, 4, 6, 5}, StaticShape{3, 1, 6, 1}},
static_output_shapes = {StaticShape{}};
//TODO cvs-108946, below test can't pass.
OV_EXPECT_THROW(unit_test::cpu_test_shape_infer(node.get(), static_input_shapes, static_output_shapes),
ov::Exception,
testing::HasSubstr("Eltwise shape infer input shapes dim index:"));

View File

@ -4,6 +4,8 @@
#include <gtest/gtest.h>
#include "custom_shape_infer.hpp"
#include "openvino/core/dimension.hpp"
#include "openvino/core/partial_shape.hpp"
#include "openvino/op/ops.hpp"
namespace ov {
namespace intel_cpu {
@ -33,7 +35,7 @@ public:
protected:
void SetUp() override {
std::tie(a_shape, b_shape) = GetParam();
(*exp_shape).clear();
set_exp_shape();
output_shapes.clear();
output_shapes.push_back(exp_shape);
@ -82,39 +84,35 @@ protected:
};
TEST_P(CPUMatMulTest, no_input_transpose) {
GTEST_SKIP() << "Skipping test, please check CVS-108946";
const auto matmul = make_matmul(a_shape.size(), b_shape.size(), false, false);
std::vector<StaticShape> static_input_shapes = {a_shape, b_shape};
// TODO 108946,below test case can't pass
matmul->set_output_type(0, element::i64, ov::PartialShape(std::vector<ov::Dimension>(exp_shape.size(), -1)));
unit_test::cpu_test_shape_infer(matmul.get(), static_input_shapes, output_shapes);
}
TEST_P(CPUMatMulTest, transpose_input_a) {
GTEST_SKIP() << "Skipping test, please check CVS-108946";
const auto matmul = make_matmul(a_shape.size(), b_shape.size(), true, false);
const auto a_transpose = make_transpose_input(a_shape);
std::vector<StaticShape> static_input_shapes = {a_transpose, b_shape};
// TODO 108946,below test case can't pass
matmul->set_output_type(0, element::i64, ov::PartialShape(std::vector<ov::Dimension>(exp_shape.size(), -1)));
unit_test::cpu_test_shape_infer(matmul.get(), static_input_shapes, output_shapes);
}
TEST_P(CPUMatMulTest, transpose_input_b) {
GTEST_SKIP() << "Skipping test, please check CVS-108946";
const auto matmul = make_matmul(a_shape.size(), b_shape.size(), false, true);
const auto b_transpose = make_transpose_input(b_shape);
std::vector<StaticShape> static_input_shapes = {a_shape, b_transpose};
// TODO 108946,below test case can't pass
matmul->set_output_type(0, element::i64, ov::PartialShape(std::vector<ov::Dimension>(exp_shape.size(), -1)));
unit_test::cpu_test_shape_infer(matmul.get(), static_input_shapes, output_shapes);
}
TEST_P(CPUMatMulTest, transpose_inputs_a_b) {
GTEST_SKIP() << "Skipping test, please check CVS-108946";
const auto matmul = make_matmul(a_shape.size(), b_shape.size(), true, true);
const auto a_transpose = make_transpose_input(a_shape);
@ -122,7 +120,7 @@ TEST_P(CPUMatMulTest, transpose_inputs_a_b) {
std::vector<StaticShape> static_input_shapes = {a_transpose, b_transpose};
// TODO 108946,below test case can't pass
matmul->set_output_type(0, element::i64, ov::PartialShape(std::vector<ov::Dimension>(exp_shape.size(), -1)));
unit_test::cpu_test_shape_infer(matmul.get(), static_input_shapes, output_shapes);
}

View File

@ -89,7 +89,6 @@ INSTANTIATE_TEST_SUITE_P(
using OneHotCpuShapeInferenceThrowExceptionTest = OneHotCpuShapeInferenceTest;
TEST_P(OneHotCpuShapeInferenceThrowExceptionTest, wrong_pattern) {
GTEST_SKIP() << "Skipping test, please check CVS-108946";
const auto depth = std::make_shared<op::v0::Parameter>(element::i64, ov::Shape{});
const auto on = std::make_shared<op::v0::Parameter>(element::i32, ov::Shape{});
const auto off = std::make_shared<op::v0::Parameter>(element::i32, ov::Shape{});
@ -101,9 +100,9 @@ TEST_P(OneHotCpuShapeInferenceThrowExceptionTest, wrong_pattern) {
const auto off_tensor = ov::Tensor(element::i32, ov::Shape{}, &m_off);
const std::unordered_map<size_t, ov::Tensor> constant_data = {{1, depth_tensor}, {2, on_tensor}, {3, off_tensor}};
// TODO , implementation should throw exception
ASSERT_THROW(unit_test::cpu_test_shape_infer(op.get(), input_shapes, output_shapes, constant_data),
ov::Exception);
OV_EXPECT_THROW(unit_test::cpu_test_shape_infer(op.get(), input_shapes, output_shapes, constant_data),
ov::Exception,
testing::HasSubstr("OneHot depth value can't be negative"));
}
INSTANTIATE_TEST_SUITE_P(

View File

@ -39,19 +39,6 @@ TEST(CpuShapeInfer, v3ShapeOf5DTest) {
unit_test::cpu_test_shape_infer(shapeof.get(), static_input_shapes, static_output_shapes);
}
TEST(CpuShapeInfer, ShapeOf0DTest) {
GTEST_SKIP() << "Skipping test, please check CVS-108946";
auto data = std::make_shared<ov::op::v0::Parameter>(element::f32, PartialShape{});
auto shapeof =
std::make_shared<op::v3::ShapeOf>(data);
std::vector<StaticShape> static_input_shapes = {StaticShape{}},
static_output_shapes = {StaticShape{}};
// TODO , can't pass implementation don't support 0D shape input
unit_test::cpu_test_shape_infer(shapeof.get(), static_input_shapes, static_output_shapes);
}
} // namespace cpu_shape_infer
} // namespace unit_test
} // namespace intel_cpu

View File

@ -101,29 +101,11 @@ INSTANTIATE_TEST_SUITE_P(
make_tuple(unit_test::ShapeVector{{3, 2, 3}, {3}, {3}, {3}}, std::vector<std::vector<int32_t>>{{1, 0, 0}, {0, 0, 0}, {1, 1, 1}},
std::vector<int64_t>{0, 1, 1}, std::vector<int64_t>(3, 1), StaticShape({2, 2, 3})),
make_tuple(unit_test::ShapeVector{{3, 2, 3}, {3}, {3}, {3}}, std::vector<std::vector<int32_t>>{{0, 1, 0}, {2, 0, 0}, {1, 1, 2}},
std::vector<int64_t>{1, 0, 1}, std::vector<int64_t>{0, 1, 1}, StaticShape({2, 1, 2}))),
// TODO 108946, can't pass;
// make_tuple(unit_test::ShapeVector{{3, 2, 3}, {3}, {3}, {3}}, std::vector<std::vector<int32_t>>{{0, 0, 0}, {1, 0, 0}, {1, 1, -1}},
// std::vector<int64_t>{0, 1, 1}, std::vector<int64_t>{0, 1, 1}, StaticShape({1, 1, 3}))),
std::vector<int64_t>{1, 0, 1}, std::vector<int64_t>{0, 1, 1}, StaticShape({2, 1, 2})),
make_tuple(unit_test::ShapeVector{{3, 2, 3}, {3}, {3}, {3}}, std::vector<std::vector<int32_t>>{{0, 0, 0}, {1, 0, 0}, {1, 1, -1}},
std::vector<int64_t>{0, 1, 1}, std::vector<int64_t>{0, 1, 1}, StaticShape({1, 2, 3}))),
StridedSliceCpuShapeInferenceTest::getTestCaseName);
TEST(CpuShapeInfer, StridedSliceDefault_stride) {
GTEST_SKIP() << "Skipping test, please check CVS-108946";
const auto mask = std::vector<int64_t>{0, 1, 0};
const auto data = std::make_shared<op::v0::Parameter>(element::f32, ov::PartialShape::dynamic());
// only supprot i32
const auto begin = op::v0::Constant::create(element::i32, ov::Shape{3}, {0, 0, 0});
const auto end = op::v0::Constant::create(element::i32, ov::Shape{3}, {1, 0, 2});
const auto op = std::make_shared<op::v1::StridedSlice>(data, begin, end, mask, mask);
std::vector<StaticShape> static_input_shapes = {{3, 2, 3}, {3}, {3}};
std::vector<StaticShape> static_output_shapes = {StaticShape{1, 2, 2}};
// implementation depends on some output information of the op
op->set_output_type(0, element::i32, {-1, -1, -1});
// TODO 108946,there is some issue in implementation, this test case can't pass
unit_test::cpu_test_shape_infer(op.get(), static_input_shapes, static_output_shapes);
}
} // namespace cpu_shape_infer
} // namespace unit_test
} // namespace intel_cpu