[CPU] Dynamism support for EmbeddingSegmentsSum & EmbeddingBagOffsetSum & EmbeddingBagPackedSum nodes (#8387)

This commit is contained in:
Edward Shogulin 2021-11-29 12:28:16 +03:00 committed by GitHub
parent c8d5b20c95
commit e2ed99a93e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 566 additions and 35 deletions

View File

@ -13,10 +13,6 @@ using namespace InferenceEngine;
bool MKLDNNEmbeddingBagOffsetSumNode::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept {
try {
if (isDynamicNgraphNode(op)) {
errorMessage = "Doesn't support op with dynamic shapes";
return false;
}
const auto embBagOffsetSumOp = ngraph::as_type_ptr<const ngraph::op::v3::EmbeddingBagOffsetsSum>(op);
if (!embBagOffsetSumOp) {
errorMessage = "Node is not an instance of the EmbeddingBagOffsetsSum operation from opset v3.";
@ -35,14 +31,11 @@ MKLDNNEmbeddingBagOffsetSumNode::MKLDNNEmbeddingBagOffsetSumNode(const std::shar
IE_THROW(NotImplemented) << errorMessage;
}
if (op->get_input_shape(INDICES_IDX).size() != 1)
IE_THROW() << "'" << _layerName << "' layer has indices data with invalid shape.";
if (getInputShapeAtPort(INDICES_IDX).getRank() != 1ul)
IE_THROW() << "'" << _layerName << "' layer has indices data with invalid rank.";
if (op->get_input_shape(OFFSETS_IDX).size() != 1)
IE_THROW() << "'" << _layerName << "' layer's offsets data has invalid shape.";
_indicesLen = op->get_input_shape(INDICES_IDX)[0];
_offsetsLen = op->get_input_shape(OFFSETS_IDX)[0];
if (getInputShapeAtPort(OFFSETS_IDX).getRank() != 1ul)
IE_THROW() << "'" << _layerName << "' layer's offsets data has invalid rank.";
}
void MKLDNNEmbeddingBagOffsetSumNode::initSupportedPrimitiveDescriptors() {
@ -77,6 +70,20 @@ void MKLDNNEmbeddingBagOffsetSumNode::initSupportedPrimitiveDescriptors() {
addSupportedPrimDesc(inDataConfigurators, {{LayoutType::ncsp, inDataPrecision}}, impl_desc_type::ref_any);
}
void MKLDNNEmbeddingBagOffsetSumNode::createPrimitive() {
if (inputShapesDefined()) {
if (needPrepareParams())
prepareParams();
updateLastInputDims();
}
}
void MKLDNNEmbeddingBagOffsetSumNode::prepareParams() {
_indicesLen = getParentEdgesAtPort(INDICES_IDX)[0]->getMemory().getStaticDims()[0];
_offsetsLen = getParentEdgesAtPort(OFFSETS_IDX)[0]->getMemory().getStaticDims()[0];
MKLDNNEmbeddingBagSumNode::prepareParams(getParentEdgesAtPort(EMB_TABLE_IDX)[0]->getMemory().getStaticDims());
}
void MKLDNNEmbeddingBagOffsetSumNode::initFromInputs() {
indicesData_ = reinterpret_cast<const int *>(getParentEdgeAt(INDICES_IDX)->getMemoryPtr()->GetPtr());
offsetsData_ = reinterpret_cast<const int *>(getParentEdgeAt(OFFSETS_IDX)->getMemoryPtr()->GetPtr());

View File

@ -19,12 +19,16 @@ public:
void getSupportedDescriptors() override {};
void initSupportedPrimitiveDescriptors() override;
void createPrimitive() override {};
void createPrimitive() override;
void execute(mkldnn::stream strm) override;
bool created() const override;
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept;
protected:
void prepareParams() override;
void executeDynamicImpl(mkldnn::stream strm) override { execute(strm); }
private:
void initFromInputs() override;
void getIndices(int embIndex, const int*& indices, size_t& size, int& weightsIdx, bool& withWeight) override;

View File

@ -13,10 +13,6 @@ using namespace InferenceEngine;
bool MKLDNNEmbeddingBagPackedSumNode::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept {
try {
if (isDynamicNgraphNode(op)) {
errorMessage = "Doesn't support op with dynamic shapes";
return false;
}
const auto embBagPackedSumOp = ngraph::as_type_ptr<const ngraph::op::v3::EmbeddingBagPackedSum>(op);
if (!embBagPackedSumOp) {
errorMessage = "Node is not an instance of the EmbeddingBagPackedSum operation from opset v3.";
@ -35,10 +31,8 @@ MKLDNNEmbeddingBagPackedSumNode::MKLDNNEmbeddingBagPackedSumNode(const std::shar
IE_THROW(NotImplemented) << errorMessage;
}
if (op->get_input_shape(INDICES_IDX).size() != 2)
IE_THROW() << "'" << _layerName << "' layer has indices data with invalid shape.";
_batch = op->get_input_shape(INDICES_IDX)[0];
_indicesPerBag = op->get_input_shape(INDICES_IDX)[1];
if (getInputShapeAtPort(INDICES_IDX).getRank() != 2ul)
IE_THROW() << "'" << _layerName << "' layer has indices data with invalid rank.";
}
void MKLDNNEmbeddingBagPackedSumNode::initSupportedPrimitiveDescriptors() {
@ -70,6 +64,20 @@ void MKLDNNEmbeddingBagPackedSumNode::initSupportedPrimitiveDescriptors() {
addSupportedPrimDesc(inDataConfigurators, {{LayoutType::ncsp, inDataPrecision}}, impl_desc_type::ref_any);
}
void MKLDNNEmbeddingBagPackedSumNode::createPrimitive() {
if (inputShapesDefined()) {
if (needPrepareParams())
prepareParams();
updateLastInputDims();
}
}
void MKLDNNEmbeddingBagPackedSumNode::prepareParams() {
_batch = getParentEdgesAtPort(INDICES_IDX)[0]->getMemory().getStaticDims()[0];
_indicesPerBag = getParentEdgesAtPort(INDICES_IDX)[0]->getMemory().getStaticDims()[1];
MKLDNNEmbeddingBagSumNode::prepareParams(getParentEdgesAtPort(EMB_TABLE_IDX)[0]->getMemory().getStaticDims());
}
void MKLDNNEmbeddingBagPackedSumNode::initFromInputs() {
_indices = reinterpret_cast<const int *>(getParentEdgeAt(INDICES_IDX)->getMemoryPtr()->GetPtr());
}

View File

@ -19,12 +19,16 @@ public:
void getSupportedDescriptors() override {};
void initSupportedPrimitiveDescriptors() override;
void createPrimitive() override {};
void createPrimitive() override;
void execute(mkldnn::stream strm) override;
bool created() const override;
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept;
protected:
void prepareParams() override;
void executeDynamicImpl(mkldnn::stream strm) override { execute(strm); }
private:
void initFromInputs() override;
void getIndices(int embIndex, const int*& indices, size_t& size, int& weightsIdx, bool& withWeight) override;

View File

@ -34,11 +34,12 @@ MKLDNNEmbeddingBagSumNode::MKLDNNEmbeddingBagSumNode(
if (op->get_input_shape(PER_SAMPLE_WEIGHTS_IDX) != op->get_input_shape(INDICES_IDX))
IE_THROW() << logPrefix << "must have equal shapes for indices and per_sample_weights inputs.";
}
}
const auto& inDataDims = op->get_input_shape(EMB_TABLE_IDX);
void MKLDNNEmbeddingBagSumNode::prepareParams(const VectorDims& indexStaticShape) {
_embDepth = 1lu;
for (size_t i = 1lu; i < inDataDims.size(); i++) {
_embDepth *= inDataDims[i];
for (size_t i = 1lu; i < indexStaticShape.size(); i++) {
_embDepth *= indexStaticShape[i];
}
}

View File

@ -35,6 +35,8 @@ protected:
int& weightsIdx,
bool& withWeights) = 0;
void prepareParams(const VectorDims& indexStaticShape);
template<typename T>
void processData(const T* srcData, const T* weightsData, T* dstData,
const InferenceEngine::SizeVector& inDataDims, const InferenceEngine::SizeVector& outDataDims);

View File

@ -11,12 +11,16 @@
using namespace MKLDNNPlugin;
using namespace InferenceEngine;
void MKLDNNEmbeddingSegmentsSumNode::createPrimitive() {
if (inputShapesDefined()) {
if (needPrepareParams())
prepareParams();
updateLastInputDims();
}
}
bool MKLDNNEmbeddingSegmentsSumNode::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept {
try {
if (isDynamicNgraphNode(op)) {
errorMessage = "Doesn't support op with dynamic shapes";
return false;
}
const auto embBagSegSumOp = ngraph::as_type_ptr<const ngraph::op::v3::EmbeddingSegmentsSum>(op);
if (!embBagSegSumOp) {
errorMessage = "Node is not an instance of the EmbeddingSegmentsSum operation from opset v3.";
@ -36,13 +40,13 @@ MKLDNNEmbeddingSegmentsSumNode::MKLDNNEmbeddingSegmentsSumNode(const std::shared
}
std::string errPrefix = std::string("EmbeddingSegmentsSum layer with name '") + _layerName + "' ";
if (op->get_input_shape(INDICES_IDX).size() != 1)
IE_THROW() << errPrefix << "has indices data with invalid shape: "
<< op->get_input_shape(INDICES_IDX).size();
if (getInputShapeAtPort(INDICES_IDX).getRank() != 1ul)
IE_THROW() << errPrefix << "has indices data with invalid rank: "
<< getInputShapeAtPort(INDICES_IDX).getRank();
if (op->get_input_shape(SEGMENT_ID_IDX).size() != 1)
IE_THROW() << errPrefix << "has invalid segmentID data shape: "
<< op->get_input_shape(SEGMENT_ID_IDX).size();
if (getInputShapeAtPort(SEGMENT_ID_IDX).getRank() != 1ul)
IE_THROW() << errPrefix << "has invalid segmentID data rank: "
<< getInputShapeAtPort(SEGMENT_ID_IDX).getRank();
}
void MKLDNNEmbeddingSegmentsSumNode::initSupportedPrimitiveDescriptors() {
@ -78,6 +82,10 @@ void MKLDNNEmbeddingSegmentsSumNode::initSupportedPrimitiveDescriptors() {
addSupportedPrimDesc(inDataConfigurators, {{LayoutType::ncsp, inDataPrecision}}, impl_desc_type::ref_any);
}
void MKLDNNEmbeddingSegmentsSumNode::prepareParams() {
MKLDNNEmbeddingBagSumNode::prepareParams(getParentEdgesAtPort(EMB_TABLE_IDX)[0]->getMemory().getStaticDims());
}
void MKLDNNEmbeddingSegmentsSumNode::initFromInputs() {
indices_ = reinterpret_cast<const int *>(getParentEdgeAt(INDICES_IDX)->getMemoryPtr()->GetPtr());
indicesSize_ = getParentEdgeAt(INDICES_IDX)->getMemory().GetShape().getElementsCount();

View File

@ -19,12 +19,16 @@ public:
void getSupportedDescriptors() override {};
void initSupportedPrimitiveDescriptors() override;
void createPrimitive() override {};
void createPrimitive() override;
void execute(mkldnn::stream strm) override;
bool created() const override;
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept;
protected:
void prepareParams() override;
void executeDynamicImpl(mkldnn::stream strm) override { execute(strm); }
private:
void initFromInputs() override;
void getIndices(int embIndex, const int*& indices, size_t& size, int& weightsIdx, bool& withWeight) override;

View File

@ -0,0 +1,168 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <string>
#include <sstream>
#include <vector>
#include <openvino/core/partial_shape.hpp>
#include "ngraph_functions/builders.hpp"
#include "shared_test_classes/base/layer_test_utils.hpp"
#include "shared_test_classes/base/ov_subgraph.hpp"
#include "test_utils/cpu_test_utils.hpp"
using namespace InferenceEngine;
using namespace CPUTestUtils;
using namespace ov::test;
namespace CPULayerTestsDefinitions {
typedef std::tuple<
InputShape, // input_shapes
std::vector<size_t>, // indices
std::vector<size_t>, // offsets
size_t, // default_index
bool, // with_weights
bool // with_def_index
> embeddingBagOffsetsSumParams;
typedef std::tuple<
embeddingBagOffsetsSumParams,
ElementType, // embedding table
ElementType, // indices
LayerTestsUtils::TargetDevice> embeddingBagOffsetsSumLayerTestParamsSet;
class EmbeddingBagOffsetsSumLayerCPUTest :
public testing::WithParamInterface<embeddingBagOffsetsSumLayerTestParamsSet>,
virtual public SubgraphBaseTest,
public CPUTestsBase {
public:
static std::string getTestCaseName(const testing::TestParamInfo<embeddingBagOffsetsSumLayerTestParamsSet>& obj) {
embeddingBagOffsetsSumParams params;
ElementType netPrecision, indPrecision;
std::string targetDevice;
std::tie(params, netPrecision, indPrecision, targetDevice) = obj.param;
InputShape inputShapes;
std::vector<size_t> indices, offsets;
size_t defaultIndex;
bool withWeights, withDefIndex;
std::tie(inputShapes, indices, offsets, defaultIndex, withWeights, withDefIndex) = params;
std::ostringstream result;
result << "IS=" << inputShapes << "_";
result << "I" << CommonTestUtils::vec2str(indices) << "_";
result << "O" << CommonTestUtils::vec2str(offsets) << "_";
result << "DI" << defaultIndex << "_";
result << "WW" << withWeights << "_";
result << "WDI" << withDefIndex << "_";
result << "netPRC=" << netPrecision << "_";
result << "indPRC=" << indPrecision << "_";
result << "targetDevice=" << targetDevice;
return result.str();
}
void SetUp() override {
embeddingBagOffsetsSumParams embParams;
ElementType indPrecision;
std::tie(embParams, inType, indPrecision, targetDevice) = this->GetParam();
InputShape inputShapes;
std::vector<size_t> indices, offsets;
bool withWeights, withDefIndex;
size_t defaultIndex;
std::tie(inputShapes, indices, offsets, defaultIndex, withWeights, withDefIndex) = embParams;
selectedType = makeSelectedTypeStr("ref", inType);
targetDevice = CommonTestUtils::DEVICE_CPU;
init_input_shapes({ inputShapes });
auto emb_table_node = std::make_shared<ngraph::opset1::Parameter>(inType, inputShapes.first);
ngraph::ParameterVector params = {emb_table_node};
auto embBag = std::dynamic_pointer_cast<ngraph::opset3::EmbeddingBagOffsetsSum>(ngraph::builder::makeEmbeddingBagOffsetsSum(
inType,
indPrecision,
emb_table_node,
indices,
offsets,
defaultIndex,
withWeights,
withDefIndex));
ngraph::ResultVector results{std::make_shared<ngraph::opset1::Result>(embBag)};
function = std::make_shared<ngraph::Function>(results, params, "embeddingBagOffsetsSum");
}
};
TEST_P(EmbeddingBagOffsetsSumLayerCPUTest, CompareWithRefs) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
run();
CheckPluginRelatedResults(executableNetwork, "embeddingBagOffsetsSum");
}
namespace {
const std::vector<ElementType> netPrecisions = {
ElementType::f32,
ElementType::i32,
ElementType::u8
};
const std::vector<ElementType> indPrecisions = {
ElementType::i64,
ElementType::i32
};
const std::vector<InputShape> input_shapes = {
// dynamic input shapes
{
// input model dynamic shapes
{ov::Dimension::dynamic(), ov::Dimension::dynamic()},
// input tensor shapes
{{5, 6}, {10, 35}}
},
{
// input model dynamic shapes
{ov::Dimension::dynamic(), ov::Dimension::dynamic(), ov::Dimension::dynamic()},
// input tensor shapes
{{5, 4, 16}, {10, 12, 8}}
},
{
// input model dynamic shapes with limits
{{5, 10}, {6, 35}, {4, 8}},
// input tensor shapes
{{5, 6, 4}, {10, 35, 8}, {5, 6, 4}}
},
// static shapes
{{5, 6}, {{5, 6}}},
{{10, 35}, {{10, 35}}},
{{5, 4, 16}, {{5, 4, 16}}},
};
const std::vector<std::vector<size_t>> indices =
{{0, 1, 2, 2, 3}, {4, 4, 3, 1, 0}, {1, 2, 1, 2, 1, 2, 1, 2, 1, 2}};
const std::vector<std::vector<size_t>> offsets = {{0, 2}, {0, 0, 2, 2}, {2, 4}};
const std::vector<size_t> default_index = {0, 4};
const std::vector<bool> with_weights = {false, true};
const std::vector<bool> with_default_index = {false, true};
const auto embBagOffsetSumArgSet = ::testing::Combine(
::testing::ValuesIn(input_shapes),
::testing::ValuesIn(indices),
::testing::ValuesIn(offsets),
::testing::ValuesIn(default_index),
::testing::ValuesIn(with_weights),
::testing::ValuesIn(with_default_index)
);
INSTANTIATE_TEST_SUITE_P(smoke, EmbeddingBagOffsetsSumLayerCPUTest,
::testing::Combine(
embBagOffsetSumArgSet,
::testing::ValuesIn(netPrecisions),
::testing::ValuesIn(indPrecisions),
::testing::Values(CommonTestUtils::DEVICE_CPU)),
EmbeddingBagOffsetsSumLayerCPUTest::getTestCaseName);
} // namespace
} // namespace CPULayerTestsDefinitions

View File

@ -0,0 +1,152 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <string>
#include <sstream>
#include <vector>
#include <openvino/core/partial_shape.hpp>
#include "ngraph_functions/builders.hpp"
#include "shared_test_classes/base/layer_test_utils.hpp"
#include "shared_test_classes/base/ov_subgraph.hpp"
#include "test_utils/cpu_test_utils.hpp"
using namespace InferenceEngine;
using namespace CPUTestUtils;
using namespace ov::test;
namespace CPULayerTestsDefinitions {
typedef std::tuple<
InputShape, // input_shapes
std::vector<std::vector<size_t>>, // indices
bool // with_weights
> embeddingBagPackedSumParams;
typedef std::tuple<
embeddingBagPackedSumParams,
ElementType, // embedding table
ElementType, // indices
LayerTestsUtils::TargetDevice> embeddingBagPackedSumLayerTestParamsSet;
class EmbeddingBagPackedSumLayerCPUTest :
public testing::WithParamInterface<embeddingBagPackedSumLayerTestParamsSet>,
virtual public SubgraphBaseTest,
public CPUTestsBase {
public:
static std::string getTestCaseName(const testing::TestParamInfo<embeddingBagPackedSumLayerTestParamsSet>& obj) {
embeddingBagPackedSumParams params;
ElementType netPrecision, indPrecision;
std::string targetDevice;
std::tie(params, netPrecision, indPrecision, targetDevice) = obj.param;
InputShape inputShapes;
std::vector<std::vector<size_t>> indices;
bool withWeights;
std::tie(inputShapes, indices, withWeights) = params;
std::ostringstream result;
result << "IS=" << inputShapes << "_";
result << "I" << CommonTestUtils::vec2str(indices) << "_";
result << "WW" << withWeights << "_";
result << "netPRC=" << netPrecision << "_";
result << "indPRC=" << indPrecision << "_";
result << "targetDevice=" << targetDevice;
return result.str();
}
protected:
void SetUp() override {
embeddingBagPackedSumParams embParams;
ElementType indPrecision;
std::tie(embParams, inType, indPrecision, targetDevice) = this->GetParam();
InputShape inputShapes;
std::vector<std::vector<size_t>> indices;
bool withWeights;
std::tie(inputShapes, indices, withWeights) = embParams;
selectedType = makeSelectedTypeStr("ref", inType);
targetDevice = CommonTestUtils::DEVICE_CPU;
init_input_shapes({ inputShapes });
auto emb_table_node = std::make_shared<ngraph::opset1::Parameter>(inType, inputShapes.first);
ngraph::ParameterVector params = {emb_table_node};
auto embBag = std::dynamic_pointer_cast<ngraph::opset3::EmbeddingBagPackedSum>(ngraph::builder::makeEmbeddingBagPackedSum(
inType,
indPrecision,
emb_table_node,
indices,
withWeights));
ngraph::ResultVector results{std::make_shared<ngraph::opset1::Result>(embBag)};
function = std::make_shared<ngraph::Function>(results, params, "embeddingBagPackedSum");
}
};
TEST_P(EmbeddingBagPackedSumLayerCPUTest, CompareWithRefs) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
run();
CheckPluginRelatedResults(executableNetwork, "embeddingBagPackedSum");
}
namespace {
const std::vector<ElementType> netPrecisions = {
ElementType::f32,
ElementType::i32,
ElementType::u8
};
const std::vector<ElementType> indPrecisions = {
ElementType::i64,
ElementType::i32
};
const std::vector<InputShape> input_shapes = {
// dynamic input shapes
{
// input model dynamic shapes
{ov::Dimension::dynamic(), ov::Dimension::dynamic()},
// input tensor shapes
{{{5, 6}}, {10, 35}}
},
{
// input model dynamic shapes
{ov::Dimension::dynamic(), ov::Dimension::dynamic(), ov::Dimension::dynamic()},
// input tensor shapes
{{5, 4, 16}, {10, 12, 8}}
},
{
// input model dynamic shapes with limits
{{5, 10}, {6, 35}, {4, 8}},
// input tensor shapes
{{5, 6, 4}, {10, 35, 8}, {5, 6, 4}}
},
// static shapes
{{5, 6}, {{5, 6}}},
{{10, 35}, {{10, 35}}},
{{5, 4, 16}, {{5, 4, 16}}},
};
const std::vector<std::vector<std::vector<size_t>>> indices =
{{{0, 1}, {2, 2}, {3, 4}}, {{4, 4, 3}, {1, 0, 2}}, {{1, 2, 1, 2}, {1, 2, 1, 2}}};
const std::vector<bool> with_weights = {false, true};
const auto embBagPackedSumArgSet = ::testing::Combine(
::testing::ValuesIn(input_shapes),
::testing::ValuesIn(indices),
::testing::ValuesIn(with_weights)
);
INSTANTIATE_TEST_SUITE_P(smoke, EmbeddingBagPackedSumLayerCPUTest,
::testing::Combine(
embBagPackedSumArgSet,
::testing::ValuesIn(netPrecisions),
::testing::ValuesIn(indPrecisions),
::testing::Values(CommonTestUtils::DEVICE_CPU)),
EmbeddingBagPackedSumLayerCPUTest::getTestCaseName);
} // namespace
} // namespace CPULayerTestsDefinitions

View File

@ -0,0 +1,173 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <string>
#include <sstream>
#include <vector>
#include <openvino/core/partial_shape.hpp>
#include "ngraph_functions/builders.hpp"
#include "shared_test_classes/base/layer_test_utils.hpp"
#include "shared_test_classes/base/ov_subgraph.hpp"
#include "test_utils/cpu_test_utils.hpp"
using namespace InferenceEngine;
using namespace CPUTestUtils;
using namespace ov::test;
namespace CPULayerTestsDefinitions {
typedef std::tuple<
InputShape, // input_shapes
std::vector<size_t>, // indices
std::vector<size_t>, // segment_ids
size_t, // num_segments
size_t, // default_index
bool, // with_weights
bool // with_def_index
> embeddingSegmentsSumParams;
typedef std::tuple<
embeddingSegmentsSumParams,
ElementType, // embedding table
ElementType, // indices
LayerTestsUtils::TargetDevice> embeddingSegmentsSumLayerTestParamsSet;
class EmbeddingSegmentsSumLayerCPUTest :
public testing::WithParamInterface<embeddingSegmentsSumLayerTestParamsSet>,
virtual public SubgraphBaseTest,
public CPUTestsBase {
public:
static std::string getTestCaseName(const testing::TestParamInfo<embeddingSegmentsSumLayerTestParamsSet>& obj) {
embeddingSegmentsSumParams params;
ElementType netPrecision, indPrecision;
std::string targetDevice;
std::tie(params, netPrecision, indPrecision, targetDevice) = obj.param;
InputShape inputShapes;
std::vector<size_t> indices, segmentIds;
size_t numSegments, defaultIndex;
bool withWeights, withDefIndex;
std::tie(inputShapes, indices, segmentIds, numSegments, defaultIndex, withWeights, withDefIndex) = params;
std::ostringstream result;
result << "IS=" << inputShapes << "_";
result << "I" << CommonTestUtils::vec2str(indices) << "_";
result << "SI" << CommonTestUtils::vec2str(segmentIds) << "_";
result << "NS" << numSegments << "_";
result << "DI" << defaultIndex << "_";
result << "WW" << withWeights << "_";
result << "WDI" << withDefIndex << "_";
result << "netPRC=" << netPrecision << "_";
result << "indPRC=" << indPrecision << "_";
result << "targetDevice=" << targetDevice;
return result.str();
}
protected:
void SetUp() override {
embeddingSegmentsSumParams embParams;
ElementType indPrecision;
std::tie(embParams, inType, indPrecision, targetDevice) = this->GetParam();
InputShape inputShapes;
std::vector<size_t> indices, segmentIds;
bool withWeights, withDefIndex;
size_t numSegments, defaultIndex;
std::tie(inputShapes, indices, segmentIds, numSegments, defaultIndex, withWeights, withDefIndex) = embParams;
selectedType = makeSelectedTypeStr("ref", inType);
targetDevice = CommonTestUtils::DEVICE_CPU;
init_input_shapes({ inputShapes });
auto emb_table_node = std::make_shared<ngraph::opset1::Parameter>(inType, inputShapes.first);
ngraph::ParameterVector params = {emb_table_node};
auto embBag = std::dynamic_pointer_cast<ngraph::opset3::EmbeddingSegmentsSum>(ngraph::builder::makeEmbeddingSegmentsSum(
inType,
indPrecision,
emb_table_node,
indices,
segmentIds,
numSegments,
defaultIndex,
withWeights,
withDefIndex));
ngraph::ResultVector results{std::make_shared<ngraph::opset1::Result>(embBag)};
function = std::make_shared<ngraph::Function>(results, params, "embeddingSegmentsSum");
}
};
TEST_P(EmbeddingSegmentsSumLayerCPUTest, CompareWithRefs) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
run();
CheckPluginRelatedResults(executableNetwork, "embeddingSegmentsSum");
}
namespace {
const std::vector<ElementType> netPrecisions = {
ElementType::f32,
ElementType::i32,
ElementType::u8
};
const std::vector<ElementType> indPrecisions = {
ElementType::i64,
ElementType::i32
};
const std::vector<InputShape> input_shapes = {
// dynamic input shapes
{
// input model dynamic shapes
{ov::Dimension::dynamic(), ov::Dimension::dynamic()},
// input tensor shapes
{{5, 6}, {10, 35}}
},
{
// input model dynamic shapes
{ov::Dimension::dynamic(), ov::Dimension::dynamic(), ov::Dimension::dynamic()},
// input tensor shapes
{{5, 4, 16}, {10, 12, 8}}
},
{
// input model dynamic shapes with limits
{{5, 10}, {6, 35}, {4, 8}},
// input tensor shapes
{{5, 6, 4}, {10, 35, 8}, {5, 6, 4}}
},
// static shapes
{{5, 6}, {{5, 6}}},
{{10, 35}, {{10, 35}}},
{{5, 4, 16}, {{5, 4, 16}}},
};
const std::vector<std::vector<size_t>> indices =
{{0, 1, 2, 2, 3}, {4, 4, 3, 1, 2}};
const std::vector<std::vector<size_t>> segment_ids = {{0, 1, 2, 3, 4}, {0, 0, 2, 2, 4}};
const std::vector<size_t> num_segments = {5, 7};
const std::vector<size_t> default_index = {0, 4};
const std::vector<bool> with_weights = {false, true};
const std::vector<bool> with_default_index = {false, true};
const auto embSegmentsSumArgSet = ::testing::Combine(
::testing::ValuesIn(input_shapes),
::testing::ValuesIn(indices),
::testing::ValuesIn(segment_ids),
::testing::ValuesIn(num_segments),
::testing::ValuesIn(default_index),
::testing::ValuesIn(with_weights),
::testing::ValuesIn(with_default_index)
);
INSTANTIATE_TEST_SUITE_P(smoke, EmbeddingSegmentsSumLayerCPUTest,
::testing::Combine(
embSegmentsSumArgSet,
::testing::ValuesIn(netPrecisions),
::testing::ValuesIn(indPrecisions),
::testing::Values(CommonTestUtils::DEVICE_CPU)),
EmbeddingSegmentsSumLayerCPUTest::getTestCaseName);
} // namespace
} // namespace CPULayerTestsDefinitions