[GPU] Support axis 0 for Softmax (#10364)

* [GPU] Modify Softmax single layer tests to check Softmax-8 is supported with axes in [-rank, rank) interval

* [GPU] Fix cldnn::softmax::dimension_t documentation

* [GPU] Fix ParamsKey::EnableSoftmaxDim

Support Z dimension.

* [GPU] Add Softmax single layer test that checks 5D case

Since some Softmax kernel code contains ifdef on 5-dimensional case,
a test case is needed that covers this functionality.

* [GPU] Support axis 0 in Softmax

* [GPU] Modify Softmax single layer tests to check axis 0

* [GPU] Modify Softmax items class optimized kernel to handle axis 0 correctly

Modify single layer test accordingly.

* [GPU] Modify Softmax unit-test to check softmax::normalize_b

* Split SoftMaxLayerTest into opset1 and opset8 versions

Use SoftMax8LayerTest in the tests throughout repository.
SoftMaxLayerTest now defaults to SoftMax1LayerTest for compatibility.

* [GPU] Add f16 test-case for Softmax single-layer test

Co-authored-by: tgubanova-lohika <tgubanova@lohika.com>
This commit is contained in:
opoluektov-lohika
2022-05-26 06:06:08 +03:00
committed by GitHub
parent 1cce278fcb
commit ccd001f25b
19 changed files with 346 additions and 121 deletions

View File

@@ -29,8 +29,8 @@ const std::vector<ov::test::InputShape> inputDynamicShape2D = {
{{ngraph::Dimension::dynamic(), ngraph::Dimension::dynamic()}, {{1, 10}, {2, 10}, {10, 10}}}
};
const std::vector<size_t> axis2D = {
0, 1
const std::vector<int64_t> axis2D = {
-2, -1, 0, 1
};
const auto params2D_static = testing::Combine(
@@ -55,16 +55,16 @@ const auto params2D_dynamic = testing::Combine(
INSTANTIATE_TEST_SUITE_P(
smoke_SoftMax2D_static,
SoftMaxLayerTest,
SoftMax8LayerTest,
params2D_static,
SoftMaxLayerTest::getTestCaseName
SoftMax8LayerTest::getTestCaseName
);
INSTANTIATE_TEST_SUITE_P(
smoke_SoftMax2D_dynamic,
SoftMaxLayerTest,
SoftMax8LayerTest,
params2D_dynamic,
SoftMaxLayerTest::getTestCaseName
SoftMax8LayerTest::getTestCaseName
);
const std::vector<ov::Shape> inputStaticShape4D = {
@@ -79,10 +79,14 @@ const std::vector<ov::test::InputShape> inputDynamicShape4D = {
{{1, 100, 1, 1}, {50, 100, 4, 1}, {2, 100, 10, 1}}},
};
const std::vector<size_t> axis4D = {0, 1, 2, 3};
const std::vector<ov::test::ElementType> netPrecisions4D = {
ov::element::f32,
};
const std::vector<int64_t> axis4D = {0, 1, 2, 3, -1, -2, -3, -4};
const auto params4Dstatic = testing::Combine(
testing::ValuesIn(netPrecisions),
testing::ValuesIn(netPrecisions4D),
::testing::Values(ov::element::undefined),
::testing::Values(ov::element::undefined),
testing::ValuesIn(ov::test::static_shapes_to_test_representation(inputStaticShape4D)),
@@ -92,7 +96,7 @@ const auto params4Dstatic = testing::Combine(
);
const auto params4Ddynamic = testing::Combine(
testing::ValuesIn(netPrecisions),
testing::ValuesIn(netPrecisions4D),
::testing::Values(ov::element::undefined),
::testing::Values(ov::element::undefined),
testing::ValuesIn(inputDynamicShape4D),
@@ -103,16 +107,69 @@ const auto params4Ddynamic = testing::Combine(
INSTANTIATE_TEST_SUITE_P(
smoke_SoftMax4D_static,
SoftMaxLayerTest,
params2D_static,
SoftMaxLayerTest::getTestCaseName
SoftMax8LayerTest,
params4Dstatic,
SoftMax8LayerTest::getTestCaseName
);
INSTANTIATE_TEST_SUITE_P(
smoke_SoftMax4D_dynamic,
SoftMaxLayerTest,
params2D_dynamic,
SoftMaxLayerTest::getTestCaseName
SoftMax8LayerTest,
params4Ddynamic,
SoftMax8LayerTest::getTestCaseName
);
const std::vector<ov::Shape> inputStaticShape5D = {
{1, 100, 1, 1, 1},
{50, 100, 4, 1, 1},
{2, 100, 10, 1, 1},
};
const std::vector<ov::test::InputShape> inputDynamicShape5D = {
{{ngraph::Dimension::dynamic(), 100, ngraph::Dimension(1, 10), 1, 1}, {{1, 100, 1, 1, 1}, {100, 100, 5, 1, 1}}},
{{ngraph::Dimension::dynamic(), ngraph::Dimension::dynamic(), ngraph::Dimension::dynamic(), ngraph::Dimension::dynamic(), ngraph::Dimension::dynamic()},
{{1, 100, 1, 1, 1}, {50, 100, 4, 1, 1}, {2, 100, 10, 1, 1}}},
};
const std::vector<ov::test::ElementType> netPrecisions5D = {
ov::element::f32,
};
const std::vector<int64_t> axis5D = {0, 1, 2, 3, 4, -1, -2, -3, -4, -5};
const auto params5Dstatic = testing::Combine(
testing::ValuesIn(netPrecisions5D),
::testing::Values(ov::element::undefined),
::testing::Values(ov::element::undefined),
testing::ValuesIn(ov::test::static_shapes_to_test_representation(inputStaticShape5D)),
testing::ValuesIn(axis5D),
testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
testing::Values(ov::AnyMap())
);
const auto params5Ddynamic = testing::Combine(
testing::ValuesIn(netPrecisions5D),
::testing::Values(ov::element::undefined),
::testing::Values(ov::element::undefined),
testing::ValuesIn(inputDynamicShape5D),
testing::ValuesIn(axis5D),
testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
testing::Values(ov::AnyMap())
);
INSTANTIATE_TEST_SUITE_P(
smoke_SoftMax5D_static,
SoftMax8LayerTest,
params5Dstatic,
SoftMax8LayerTest::getTestCaseName
);
INSTANTIATE_TEST_SUITE_P(
smoke_SoftMax5D_dynamic,
SoftMax8LayerTest,
params5Ddynamic,
SoftMax8LayerTest::getTestCaseName
);
} // namespace

View File

@@ -27,6 +27,7 @@ struct softmax : public primitive_base<softmax> {
/// @brief Enum type to specify softmax's normalization scope (see #dimension).
enum dimension_t {
normalize_b,
normalize_f,
normalize_x,
normalize_y,
@@ -52,9 +53,10 @@ struct softmax : public primitive_base<softmax> {
/// Specific behaviour is determined by this parameter, as follows:
/// - when set to @link softmax::dimension_t softmax::normalize_x @endlink each input row is normalized independently,
/// - when set to @link softmax::dimension_t softmax::normalize_y @endlink each input column is normalized independently,
/// - when set to @link softmax::dimension_t softmax::normalize_z @endlink each input z-coordinate is normalized independently,
/// - when set to @link softmax::dimension_t softmax::normalize_f @endlink each in-depth vector of input is normalized independently,
/// - when set to @link softmax::dimension_t softmax::normalize_fyx @endlink each 3d image within input is normalized independently,
/// - when set to @link softmax::dimension_t softmax::normalize_bfyx @endlink everything is normalized,
/// - when set to @link softmax::dimension_t softmax::normalize_all @endlink everything is normalized,
dimension_t dimension;
};
/// @}

View File

@@ -47,6 +47,10 @@ struct softmax_impl : typed_primitive_impl_ocl<softmax> {
sm_params.dim = kernel_selector::softmax_dim::FEATURE;
break;
case softmax::normalize_b:
sm_params.dim = kernel_selector::softmax_dim::BATCH;
break;
case softmax::normalize_f:
sm_params.dim = kernel_selector::softmax_dim::FEATURE;
break;

View File

@@ -325,6 +325,7 @@ enum class SoftmaxDim {
Y,
Z,
FEATURE,
BATCH,
};
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////

View File

@@ -36,6 +36,7 @@ ParamsKey SoftmaxItemsClassKernelBase::GetDefaultSupportedKey() {
k.EnableSoftmaxDim(SoftmaxDim::Y);
k.EnableSoftmaxDim(SoftmaxDim::Z);
k.EnableSoftmaxDim(SoftmaxDim::FEATURE);
k.EnableSoftmaxDim(SoftmaxDim::BATCH);
k.EnableDifferentTypes();
k.EnableTensorOffset();
k.EnableTensorPitches();
@@ -53,6 +54,8 @@ std::vector<size_t> SoftmaxItemsClassKernelBase::GetSoftmaxDimGlobalSizes(Softma
return {out.X().v * out.Y().v, out.Feature().v, out.Batch().v};
case SoftmaxDim::FEATURE:
return {out.X().v * out.Z().v, out.Y().v, out.Batch().v};
case SoftmaxDim::BATCH:
return {out.X().v * out.Z().v, out.Y().v, out.Feature().v};
default:
return {};
}
@@ -70,14 +73,16 @@ JitConstants SoftmaxItemsClassKernelBase::GetJitConstants(const softmax_params&
MakeJitConstant("INPUT0_OTHER0_SIZE", "INPUT0_SIZE_Y"),
MakeJitConstant("INPUT0_OTHER1_PITCH", "INPUT0_FEATURE_PITCH"),
MakeJitConstant("INPUT0_OTHER2_PITCH", "INPUT0_Z_PITCH"),
MakeJitConstant("INPUT0_OTHER3_PITCH", "INPUT0_BATCH_PITCH"),
MakeJitConstant("INPUT0_CLASS_PITCH", "INPUT0_X_PITCH"),
MakeJitConstant("INPUT0_CLASS_NUM", "INPUT0_SIZE_X"),
MakeJitConstant("OUTPUT_OTHER0_PITCH", "OUTPUT_Y_PITCH"),
MakeJitConstant("OUTPUT_OTHER2_PITCH", "OUTPUT_Z_PITCH"),
MakeJitConstant("OUTPUT_OTHER1_PITCH", "OUTPUT_FEATURE_PITCH"),
MakeJitConstant("OUTPUT_OTHER2_PITCH", "OUTPUT_Z_PITCH"),
MakeJitConstant("OUTPUT_OTHER3_PITCH", "OUTPUT_BATCH_PITCH"),
MakeJitConstant("OUTPUT_CLASS_PITCH", "OUTPUT_X_PITCH"),
});
idx_order = {"batch", "other1", ndims == 5 ? "other2" : "0", "other0", "cls"};
idx_order = {"other3", "other1", ndims == 5 ? "other2" : "0", "other0", "cls"};
break;
case SoftmaxDim::Y:
jit.AddConstants({
@@ -85,14 +90,16 @@ JitConstants SoftmaxItemsClassKernelBase::GetJitConstants(const softmax_params&
MakeJitConstant("INPUT0_OTHER0_SIZE", "INPUT0_SIZE_X"),
MakeJitConstant("INPUT0_OTHER1_PITCH", "INPUT0_FEATURE_PITCH"),
MakeJitConstant("INPUT0_OTHER2_PITCH", "INPUT0_Z_PITCH"),
MakeJitConstant("INPUT0_OTHER3_PITCH", "INPUT0_BATCH_PITCH"),
MakeJitConstant("INPUT0_CLASS_PITCH", "INPUT0_Y_PITCH"),
MakeJitConstant("INPUT0_CLASS_NUM", "INPUT0_SIZE_Y"),
MakeJitConstant("OUTPUT_OTHER0_PITCH", "OUTPUT_X_PITCH"),
MakeJitConstant("OUTPUT_OTHER1_PITCH", "OUTPUT_FEATURE_PITCH"),
MakeJitConstant("OUTPUT_OTHER2_PITCH", "OUTPUT_Z_PITCH"),
MakeJitConstant("OUTPUT_OTHER3_PITCH", "OUTPUT_BATCH_PITCH"),
MakeJitConstant("OUTPUT_CLASS_PITCH", "OUTPUT_Y_PITCH"),
});
idx_order = {"batch", "other1", ndims == 5 ? "other2" : "0", "cls", "other0"};
idx_order = {"other3", "other1", ndims == 5 ? "other2" : "0", "cls", "other0"};
break;
case SoftmaxDim::Z:
jit.AddConstants({
@@ -100,14 +107,16 @@ JitConstants SoftmaxItemsClassKernelBase::GetJitConstants(const softmax_params&
MakeJitConstant("INPUT0_OTHER0_SIZE", "INPUT0_SIZE_X"),
MakeJitConstant("INPUT0_OTHER1_PITCH", "INPUT0_FEATURE_PITCH"),
MakeJitConstant("INPUT0_OTHER2_PITCH", "INPUT0_Y_PITCH"),
MakeJitConstant("INPUT0_OTHER3_PITCH", "INPUT0_BATCH_PITCH"),
MakeJitConstant("INPUT0_CLASS_PITCH", "INPUT0_Z_PITCH"),
MakeJitConstant("INPUT0_CLASS_NUM", "INPUT0_SIZE_Z"),
MakeJitConstant("OUTPUT_OTHER0_PITCH", "OUTPUT_X_PITCH"),
MakeJitConstant("OUTPUT_OTHER1_PITCH", "OUTPUT_FEATURE_PITCH"),
MakeJitConstant("OUTPUT_OTHER2_PITCH", "OUTPUT_Y_PITCH"),
MakeJitConstant("OUTPUT_OTHER3_PITCH", "OUTPUT_BATCH_PITCH"),
MakeJitConstant("OUTPUT_CLASS_PITCH", "OUTPUT_Z_PITCH"),
});
idx_order = {"batch", "other1", "cls", "other2", "other0"};
idx_order = {"other3", "other1", "cls", "other2", "other0"};
break;
case SoftmaxDim::FEATURE:
jit.AddConstants({
@@ -115,14 +124,33 @@ JitConstants SoftmaxItemsClassKernelBase::GetJitConstants(const softmax_params&
MakeJitConstant("INPUT0_OTHER0_SIZE", "INPUT0_SIZE_X"),
MakeJitConstant("INPUT0_OTHER1_PITCH", "INPUT0_Y_PITCH"),
MakeJitConstant("INPUT0_OTHER2_PITCH", "INPUT0_Z_PITCH"),
MakeJitConstant("INPUT0_OTHER3_PITCH", "INPUT0_BATCH_PITCH"),
MakeJitConstant("INPUT0_CLASS_PITCH", "INPUT0_FEATURE_PITCH"),
MakeJitConstant("INPUT0_CLASS_NUM", "INPUT0_FEATURE_NUM"),
MakeJitConstant("OUTPUT_OTHER0_PITCH", "OUTPUT_X_PITCH"),
MakeJitConstant("OUTPUT_OTHER1_PITCH", "OUTPUT_Y_PITCH"),
MakeJitConstant("OUTPUT_OTHER2_PITCH", "OUTPUT_Z_PITCH"),
MakeJitConstant("OUTPUT_OTHER3_PITCH", "OUTPUT_BATCH_PITCH"),
MakeJitConstant("OUTPUT_CLASS_PITCH", "OUTPUT_FEATURE_PITCH"),
});
idx_order = {"batch", "cls", ndims == 5 ? "other2" : "0", "other1", "other0"};
idx_order = {"other3", "cls", ndims == 5 ? "other2" : "0", "other1", "other0"};
break;
case SoftmaxDim::BATCH:
jit.AddConstants({
MakeJitConstant("INPUT0_OTHER0_PITCH", "INPUT0_X_PITCH"),
MakeJitConstant("INPUT0_OTHER0_SIZE", "INPUT0_SIZE_X"),
MakeJitConstant("INPUT0_OTHER1_PITCH", "INPUT0_Y_PITCH"),
MakeJitConstant("INPUT0_OTHER2_PITCH", "INPUT0_Z_PITCH"),
MakeJitConstant("INPUT0_OTHER3_PITCH", "INPUT0_FEATURE_PITCH"),
MakeJitConstant("INPUT0_CLASS_PITCH", "INPUT0_BATCH_PITCH"),
MakeJitConstant("INPUT0_CLASS_NUM", "INPUT0_BATCH_NUM"),
MakeJitConstant("OUTPUT_OTHER0_PITCH", "OUTPUT_X_PITCH"),
MakeJitConstant("OUTPUT_OTHER1_PITCH", "OUTPUT_Y_PITCH"),
MakeJitConstant("OUTPUT_OTHER2_PITCH", "OUTPUT_Z_PITCH"),
MakeJitConstant("OUTPUT_OTHER3_PITCH", "OUTPUT_FEATURE_PITCH"),
MakeJitConstant("OUTPUT_CLASS_PITCH", "OUTPUT_BATCH_PITCH"),
});
idx_order = {"cls", "other3", ndims == 5 ? "other2" : "0", "other1", "other0"};
break;
default:
break;

View File

@@ -25,6 +25,9 @@ inline static size_t GetItemClassCount(const DataTensor& input, SoftmaxDim dim)
case SoftmaxDim::FEATURE:
item_class_count = input.Feature().v;
break;
case SoftmaxDim::BATCH:
item_class_count = input.Batch().v;
break;
default:
break;
}

View File

@@ -24,11 +24,11 @@ KERNEL(softmax_items_class_optimized)(
const uint other2 = 0;
#endif
const uint other1 = get_group_id(1);
const uint batch = get_group_id(2);
const uint other3 = get_group_id(2);
const uint simd_lane = get_sub_group_local_id();
const uint in_depth_offset = batch*INPUT0_BATCH_PITCH + other2*INPUT0_OTHER2_PITCH + other1*INPUT0_OTHER1_PITCH + other0*INPUT0_OTHER0_PITCH + INPUT0_OFFSET;
const uint out_depth_offset = batch*OUTPUT_BATCH_PITCH + other2*OUTPUT_OTHER2_PITCH + other1*OUTPUT_OTHER1_PITCH + other0*OUTPUT_OTHER0_PITCH + OUTPUT_OFFSET;
const uint in_depth_offset = other3*INPUT0_OTHER3_PITCH + other2*INPUT0_OTHER2_PITCH + other1*INPUT0_OTHER1_PITCH + other0*INPUT0_OTHER0_PITCH + INPUT0_OFFSET;
const uint out_depth_offset = other3*OUTPUT_OTHER3_PITCH + other2*OUTPUT_OTHER2_PITCH + other1*OUTPUT_OTHER1_PITCH + other0*OUTPUT_OTHER0_PITCH + OUTPUT_OFFSET;
ACCUMULATOR_TYPE max_value = UNIT_VAL_MIN;
ACCUMULATOR_TYPE data[DATA_PER_WORKITEM];

View File

@@ -21,10 +21,10 @@ KERNEL(softmax)(
const uint other2 = 0;
#endif
const uint other1 = get_global_id(1);
const uint batch = get_global_id(2);
const uint other3 = get_global_id(2);
const uint in_depth_offset = batch*INPUT0_BATCH_PITCH + other2*INPUT0_OTHER2_PITCH + other1*INPUT0_OTHER1_PITCH + other0*INPUT0_OTHER0_PITCH + INPUT0_OFFSET;
const uint out_depth_offset = batch*OUTPUT_BATCH_PITCH + other2*OUTPUT_OTHER2_PITCH + other1*OUTPUT_OTHER1_PITCH + other0*OUTPUT_OTHER0_PITCH + OUTPUT_OFFSET;
const uint in_depth_offset = other3*INPUT0_OTHER3_PITCH + other2*INPUT0_OTHER2_PITCH + other1*INPUT0_OTHER1_PITCH + other0*INPUT0_OTHER0_PITCH + INPUT0_OFFSET;
const uint out_depth_offset = other3*OUTPUT_OTHER3_PITCH + other2*OUTPUT_OTHER2_PITCH + other1*OUTPUT_OTHER1_PITCH + other0*OUTPUT_OTHER0_PITCH + OUTPUT_OFFSET;
ACCUMULATOR_TYPE max_value = UNIT_VAL_MIN;
ACCUMULATOR_TYPE data[INPUT0_CLASS_NUM];

View File

@@ -271,7 +271,9 @@ std::string toString(SoftmaxDim d) {
switch (d) {
case SoftmaxDim::X: return "X";
case SoftmaxDim::Y: return "Y";
case SoftmaxDim::Z: return "Z";
case SoftmaxDim::FEATURE: return "FEATURE";
case SoftmaxDim::BATCH: return "BATCH";
default: return "";
}
}

View File

@@ -270,9 +270,15 @@ void ParamsKey::EnableSoftmaxDim(SoftmaxDim d) {
case SoftmaxDim::Y:
key.restrict.val.dedicated.softmax.dimY = 1;
break;
case SoftmaxDim::Z:
key.restrict.val.dedicated.softmax.dimZ = 1;
break;
case SoftmaxDim::FEATURE:
key.restrict.val.dedicated.softmax.dimFeature = 1;
break;
case SoftmaxDim::BATCH:
key.restrict.val.dedicated.softmax.dimBatch = 1;
break;
default:
break;
}

View File

@@ -141,7 +141,9 @@ public:
struct softmax_t {
uint32_t dimX : 1;
uint32_t dimY : 1;
uint32_t dimZ : 1;
uint32_t dimFeature : 1;
uint32_t dimBatch : 1;
} softmax;
struct region_yolo_t {
uint32_t dimX : 1;

View File

@@ -17,8 +17,7 @@ namespace intel_gpu {
static cldnn::softmax::dimension_t GetSoftmaxAxis(int64_t axis, size_t rank) {
switch (axis) {
// FIXME: it seems that axis=0 should correspond to normalize_b;
case 0: return cldnn::softmax::normalize_all;
case 0: return cldnn::softmax::normalize_b;
case 1: return cldnn::softmax::normalize_f;
case 2:
if (rank > 4)

View File

@@ -820,6 +820,96 @@ TEST(softmax_gpu_bfzyx_f16, normalize_all) {
ASSERT_NEAR(sum, expected_sum, 0.001);
}
TEST(softmax_gpu_bfyx_f32, normalize_b) {
// Input : 3x2x2x2
static const int32_t x_size = 2, y_size = 2, feature_num = 2,
batch_num = 3, buf_size = x_size*y_size * batch_num * feature_num;
auto& engine = get_test_engine();
auto input = engine.allocate_memory({ data_types::f32, format::bfyx,{ batch_num, feature_num, x_size , y_size } });
topology topology;
topology.add(input_layout("input", input->get_layout()));
topology.add(softmax("softmax", "input", softmax::normalize_b));
vector<float> input_vec = {
// y0x0 y0x1 y1x0 y1x1
/*b0f0*/0.1f, -0.1f, 0.9f, 1.5f,
/*b0f1*/3.f, 0.5f, 7.f, 12.f,
/*b1f0*/0.2f, 0.2f, -10.f, 5.2f,
/*b1f1*/4.f, 0.5f, 8.f, 8.2f,
/*b2f0*/0.2f, 0.2f, -10.f, 5.2f,
/*b2f1*/0.2f, 0.2f, -10.f, 5.2f
};
set_values(input, input_vec);
float expected_max_values[8] = {
0.344253346f, //f=0, y=0, x=0
0.364854551f, //f=0, y=0, x=1
0.999963085f, //f=0, y=1, x=0
0.493894592f, //f=0, y=1, x=1
0.719294981f, //f=1, y=0, x=0
0.364854551f, //f=1, y=0, x=1
0.73105857f, //f=1, y=1, x=0
0.977054322f //f=1, y=1, x=1
};
network network(engine, topology);
network.set_input_data("input", input);
auto outputs = network.execute();
EXPECT_EQ(outputs.size(), size_t(1));
EXPECT_EQ(outputs.begin()->first, "softmax");
auto output = outputs.at("softmax").get_memory();
cldnn::mem_lock<float> output_ptr(output, get_test_stream());
float out_buffer[buf_size];
for (uint32_t i = 0; i < buf_size; i++)
{
out_buffer[i] = output_ptr[i];
}
float temp_max = 0;
float expected_sum = 1.0f;
int max_value_buffer_index = 0;
for (uint32_t i = 0; i < feature_num; i++) //this for loops will sum results in a batch per feature, we expect that: sum = 1.0f
{
for (uint32_t j = 0; j < y_size; j++)
{
for (uint32_t k = 0; k < x_size; k++)
{
float sum = 0.0f;
for (uint32_t l = 0; l < batch_num; l++)
{
int index = l * feature_num * x_size * y_size +
i * x_size * y_size +
j * x_size +
k;
if (out_buffer[index] >= temp_max)
{
temp_max = out_buffer[index];
}
sum += out_buffer[index];
}
EXPECT_EQ(true, are_equal(temp_max, expected_max_values[max_value_buffer_index]));
temp_max = 0;
max_value_buffer_index++;
EXPECT_EQ(true, are_equal(sum, expected_sum));
sum = 0.0f;
}
}
}
}
//////////////////////////////////////////////////////////////////////////////
// //
// Exhaustive Negative Matrix tests //

View File

@@ -28,8 +28,8 @@ const std::vector<ov::test::InputShape> inputDynamicShape2D = {
{{ngraph::Dimension::dynamic(), ngraph::Dimension::dynamic()}, {{1, 10}, {2, 10}, {10, 10}}}
};
const std::vector<size_t> axis2D = {
0, 1
const std::vector<int64_t> axis2D = {
0, 1, -1
};
const auto params2D_static = testing::Combine(
@@ -54,16 +54,16 @@ const auto params2D_dynamic = testing::Combine(
INSTANTIATE_TEST_SUITE_P(
smoke_SoftMax2D_static,
SoftMaxLayerTest,
SoftMax8LayerTest,
params2D_static,
SoftMaxLayerTest::getTestCaseName
SoftMax8LayerTest::getTestCaseName
);
INSTANTIATE_TEST_SUITE_P(
smoke_SoftMax2D_dynamic,
SoftMaxLayerTest,
SoftMax8LayerTest,
params2D_dynamic,
SoftMaxLayerTest::getTestCaseName
SoftMax8LayerTest::getTestCaseName
);
const std::vector<ov::Shape> inputStaticShape4D = {
@@ -78,7 +78,7 @@ const std::vector<ov::test::InputShape> inputDynamicShape4D = {
{{1, 100, 1, 1}, {50, 100, 4, 1}, {2, 100, 10, 1}}},
};
const std::vector<size_t> axis4D = {0, 1, 2, 3};
const std::vector<int64_t> axis4D = {0, 1, 2, 3, -1, -2, -3};
const auto params4Dstatic = testing::Combine(
testing::ValuesIn(netPrecisions),
@@ -102,16 +102,16 @@ const auto params4Ddynamic = testing::Combine(
INSTANTIATE_TEST_SUITE_P(
smoke_SoftMax4D_static,
SoftMaxLayerTest,
SoftMax8LayerTest,
params2D_static,
SoftMaxLayerTest::getTestCaseName
SoftMax8LayerTest::getTestCaseName
);
INSTANTIATE_TEST_SUITE_P(
smoke_SoftMax4D_dynamic,
SoftMaxLayerTest,
SoftMax8LayerTest,
params2D_dynamic,
SoftMaxLayerTest::getTestCaseName
SoftMax8LayerTest::getTestCaseName
);
} // namespace

View File

@@ -12,6 +12,7 @@ using namespace ov::test::subgraph;
namespace {
const std::vector<ov::test::ElementType> netPrecisions = {
ov::element::f16,
ov::element::f32,
};
@@ -19,10 +20,11 @@ const std::vector<ov::Shape> inputShapes2D = {
{1, 100},
{100, 1},
{10, 10},
{100, 10}
};
const std::vector<size_t> axis2D = {
0, 1
const std::vector<int64_t> axis2D = {
-2, -1, 0, 1
};
const auto params2D = testing::Combine(
@@ -37,9 +39,9 @@ const auto params2D = testing::Combine(
INSTANTIATE_TEST_SUITE_P(
smoke_SoftMax2D,
SoftMaxLayerTest,
SoftMax8LayerTest,
params2D,
SoftMaxLayerTest::getTestCaseName
SoftMax8LayerTest::getTestCaseName
);
const std::vector<ov::Shape> inputShapes4D = {
@@ -48,7 +50,7 @@ const std::vector<ov::Shape> inputShapes4D = {
{2, 3, 4, 5},
};
const std::vector<size_t> axis4D = {0, 1, 2, 3};
const std::vector<int64_t> axis4D = {-4, -3, -2, -1, 0, 1, 2, 3};
const auto params4D = testing::Combine(
testing::ValuesIn(netPrecisions),
@@ -62,9 +64,34 @@ const auto params4D = testing::Combine(
INSTANTIATE_TEST_SUITE_P(
smoke_SoftMax4D,
SoftMaxLayerTest,
SoftMax8LayerTest,
params4D,
SoftMaxLayerTest::getTestCaseName
SoftMax8LayerTest::getTestCaseName
);
const std::vector<ov::Shape> inputShapes5D = {
{1, 100, 1, 1, 1},
{1, 3, 4, 3, 4},
{2, 3, 4, 5, 6},
};
const std::vector<int64_t> axis5D = {-5, -4, -3, -2, -1, 0, 1, 2, 3, 4};
const auto params5D = testing::Combine(
testing::ValuesIn(netPrecisions),
::testing::Values(ov::element::undefined),
::testing::Values(ov::element::undefined),
testing::ValuesIn(ov::test::static_shapes_to_test_representation(inputShapes5D)),
testing::ValuesIn(axis5D),
testing::Values(CommonTestUtils::DEVICE_GPU),
testing::Values(ov::AnyMap())
);
INSTANTIATE_TEST_SUITE_P(
smoke_SoftMax5D,
SoftMax8LayerTest,
params5D,
SoftMax8LayerTest::getTestCaseName
);
} // namespace

View File

@@ -20,8 +20,6 @@ std::vector<std::string> disabledTestPatterns() {
R"(.*(PreprocessTest).*(SetMeanImagePreProcessSetBlob).*)",
R"(.*(PreprocessTest).*(ReverseInputChannelsPreProcessGetBlob).*)",
R"(.*(InferRequestPreprocessDynamicallyInSetBlobTest).*)",
// TODO: Issue: 41462
R"(.*(SoftMaxLayerTest).*axis=0.*)",
// TODO: Issue: 46841
R"(.*(QuantGroupConvBackpropData3D).*)",
@@ -69,8 +67,6 @@ std::vector<std::string> disabledTestPatterns() {
R"(.*smoke_LSTMSequenceCommonClip.*LSTMSequenceTest.*CompareWithRefs.*)",
// Expected behavior. GPU plugin doesn't support i64 for eltwise power operation.
R"(.*EltwiseLayerTest.*OpType=Pow.*NetType=i64.*)",
// TODO: Issue: 67486
R"(.*(SoftMaxLayerTest).*)",
// TODO: Issue: 68712
R"(.*.MatMul.*CompareWithRefs.*IS0=\(1.5\)_IS1=\(1.5\).*transpose_a=0.*transpose_b=1.*CONSTANT.*FP16.*UNSPECIFIED.*UNSPECIFIED.*ANY.*)",
// TODO: Issue 69187

View File

@@ -18,6 +18,14 @@ TEST_P(SoftMaxLayerTest, CompareQueryModel) {
query_model();
}
TEST_P(SoftMax8LayerTest, CompareWithRefs) {
run();
}
TEST_P(SoftMax8LayerTest, CompareQueryModel) {
query_model();
}
} // namespace subgraph
} // namespace test
} // namespace ov

View File

@@ -4,30 +4,87 @@
#pragma once
#include "ngraph_functions/builders.hpp"
#include "common_test_utils/common_utils.hpp"
#include "shared_test_classes/base/ov_subgraph.hpp"
namespace ov {
namespace test {
namespace subgraph {
using SoftMaxTestParams = std::tuple<
ElementType, // netPrecision
ElementType, // inPrecision
ElementType, // outPrecision
InputShape, // Dynamic shape + Target static shapes
size_t, // axis
TargetDevice, // targetDevice
Config // config
>;
namespace aux {
class SoftMaxLayerTest : public testing::WithParamInterface<SoftMaxTestParams>,
virtual public ov::test::SubgraphBaseTest {
template <class AxisType>
using SoftMaxTestParams = std::tuple<ElementType, // netPrecision
ElementType, // inPrecision
ElementType, // outPrecision
InputShape, // Dynamic shape + Target static shapes
AxisType, // axis
TargetDevice, // targetDevice
Config // config
>;
template <class AxisType, class SoftmaxOpType>
class SoftMaxLayerTestBase : public testing::WithParamInterface<SoftMaxTestParams<AxisType>>,
virtual public ov::test::SubgraphBaseTest {
public:
static std::string getTestCaseName(const testing::TestParamInfo<SoftMaxTestParams> &obj);
static std::string getTestCaseName(const testing::TestParamInfo<SoftMaxTestParams<AxisType>> &obj) {
ElementType netType, inType, outType;
InputShape shapes;
AxisType axis;
TargetDevice targetDevice;
Config config;
std::tie(netType, inType, outType, shapes, axis, targetDevice, config) = obj.param;
std::ostringstream result;
result << "NetType=" << netType << "_";
result << "InType=" << inType << "_";
result << "OutType=" << outType << "_";
result << "IS=" << CommonTestUtils::partialShape2str({shapes.first}) << "_";
result << "TS=";
for (const auto& item : shapes.second) {
result << CommonTestUtils::vec2str(item) << "_";
}
result << "Axis=" << axis << "_";
result << "Device=" << targetDevice;
return result.str();
}
protected:
void SetUp() override;
void SetUp() override {
InputShape shapes;
ElementType ngPrc;
AxisType axis;
std::tie(ngPrc, inType, outType, shapes, axis, targetDevice, configuration) = this->GetParam();
init_input_shapes({shapes});
const auto params = ngraph::builder::makeDynamicParams(ngPrc, inputDynamicShapes);
const auto paramOuts =
ngraph::helpers::convert2OutputVector(ngraph::helpers::castOps2Nodes<ngraph::op::Parameter>(params));
const auto softMax = std::make_shared<SoftmaxOpType>(paramOuts.at(0), axis);
const ngraph::ResultVector results{std::make_shared<ngraph::opset8::Result>(softMax)};
// TODO: This workaround is needed as there is no full support for f16 type in the reference implementation
if (ngPrc == element::Type_t::f16) {
abs_threshold = 0.005;
}
function = std::make_shared<ngraph::Function>(results, params, "softMax");
}
};
} // namespace aux
using SoftMax1LayerTest = aux::SoftMaxLayerTestBase<size_t, ngraph::opset1::Softmax>;
using SoftMax8LayerTest = aux::SoftMaxLayerTestBase<int64_t, ngraph::opset8::Softmax>;
using SoftMaxLayerTest = SoftMax1LayerTest;
} // namespace subgraph
} // namespace test
} // namespace ov

View File

@@ -1,57 +0,0 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph_functions/builders.hpp"
#include "common_test_utils/common_utils.hpp"
#include "shared_test_classes/single_layer/softmax.hpp"
namespace ov {
namespace test {
namespace subgraph {
std::string SoftMaxLayerTest::getTestCaseName(const testing::TestParamInfo<SoftMaxTestParams>& obj) {
ElementType netType, inType, outType;
InputShape shapes;
size_t axis;
TargetDevice targetDevice;
Config config;
std::tie(netType, inType, outType, shapes, axis, targetDevice, config) = obj.param;
std::ostringstream result;
result << "NetType=" << netType << "_";
result << "InType=" << inType << "_";
result << "OutType=" << outType << "_";
result << "IS=" << CommonTestUtils::partialShape2str({shapes.first}) << "_";
result << "TS=";
for (const auto& item : shapes.second) {
result << CommonTestUtils::vec2str(item) << "_";
}
result << "Axis=" << axis << "_";
result << "Device=" << targetDevice;
return result.str();
}
void SoftMaxLayerTest::SetUp() {
InputShape shapes;
ElementType ngPrc;
size_t axis;
std::tie(ngPrc, inType, outType, shapes, axis, targetDevice, configuration) = GetParam();
init_input_shapes({shapes});
const auto params = ngraph::builder::makeDynamicParams(ngPrc, inputDynamicShapes);
const auto paramOuts =
ngraph::helpers::convert2OutputVector(ngraph::helpers::castOps2Nodes<ngraph::op::Parameter>(params));
const auto softMax = std::make_shared<ngraph::opset1::Softmax>(paramOuts.at(0), axis);
const ngraph::ResultVector results {std::make_shared<ngraph::opset1::Result>(softMax)};
function = std::make_shared<ngraph::Function>(results, params, "softMax");
}
} // namespace subgraph
} // namespace test
} // namespace ov