Merge remote-tracking branch 'upstream/master' into layer_test_common
This commit is contained in:
commit
759326e50d
@ -65,6 +65,7 @@ const InferenceEngine::details::caseless_unordered_map<std::string, Type> type_t
|
||||
{ "Reshape", Reshape },
|
||||
{ "Squeeze", Reshape },
|
||||
{ "Unsqueeze", Reshape },
|
||||
{ "ShapeOf", ShapeOf },
|
||||
{ "Softmax", Softmax },
|
||||
{ "Reorder", Reorder },
|
||||
{ "BatchToSpace", BatchToSpace },
|
||||
@ -225,6 +226,8 @@ std::string NameFromType(const Type type) {
|
||||
return "StridedSlice";
|
||||
case Reshape:
|
||||
return "Reshape";
|
||||
case ShapeOf:
|
||||
return "ShapeOf";
|
||||
case Tile:
|
||||
return "Tile";
|
||||
case ROIAlign:
|
||||
|
@ -33,6 +33,7 @@ enum Type {
|
||||
Eltwise,
|
||||
MatMul,
|
||||
Reshape,
|
||||
ShapeOf,
|
||||
Tile,
|
||||
ROIAlign,
|
||||
ROIPooling,
|
||||
|
@ -1209,12 +1209,51 @@ bool MKLDNNGraph::InsertNode(MKLDNNNodePtr parent, MKLDNNNodePtr child, MKLDNNNo
|
||||
void MKLDNNGraph::EnforceBF16() {
|
||||
// Floating point parts of FP32 + INT8 or FP32 + BIN mixed precision models will be executed in BF16 precision
|
||||
// only if enforceBF16 flag was set manually because current performance is not good enough to enable it by default
|
||||
if (implication(isQuantized(), config.manualEnforceBF16)) {
|
||||
for (auto &node : graphNodes) {
|
||||
if (!implication(isQuantized(), config.manualEnforceBF16))
|
||||
return;
|
||||
/* list of node types that must be forced to be executed in BF16 precision
|
||||
* because of performance gains */
|
||||
static const std::unordered_set<Type, std::hash<int>> significantNodes { // std::hash<int> is necessary old compilers (defect in C++11 standart)
|
||||
Convolution, // conv nets
|
||||
FullyConnected, // conv / bert nets
|
||||
RNNCell, // recurent nets
|
||||
RNNSeq, // recurent nets
|
||||
MatMul, // bert nets
|
||||
ROIPooling, // object detection nets
|
||||
Interpolate, // super resolution nets
|
||||
};
|
||||
|
||||
std::function<void(const MKLDNNNodePtr&, std::unordered_set<MKLDNNNodePtr>& skipNodes)> searchForNodesToSkip;
|
||||
searchForNodesToSkip = [&](const MKLDNNNodePtr& node, std::unordered_set<MKLDNNNodePtr>& skipNodes) -> void {
|
||||
for (size_t i = 0; i < node->getParentEdges().size(); i++) {
|
||||
const auto& parent = node->getParentEdgeAt(i)->getParent();
|
||||
if (significantNodes.count(parent->getType())) // stop at significant nodes
|
||||
continue;
|
||||
|
||||
const auto res = skipNodes.insert(parent);
|
||||
if (res.second) // node not visited yet
|
||||
searchForNodesToSkip(parent, skipNodes);
|
||||
}
|
||||
};
|
||||
|
||||
/* Skip BF16 enforcement for tail of the graph by forming set of nodes to skip.
|
||||
* Necessary to maintain accuracy.
|
||||
* Experiments show zero peformance impact on average */
|
||||
std::unordered_set<MKLDNNNodePtr> nodesToSkip;
|
||||
// starting from output nodes
|
||||
for (const auto& entry : outputNodesMap) {
|
||||
const auto& node = entry.second;
|
||||
searchForNodesToSkip(node, nodesToSkip);
|
||||
}
|
||||
|
||||
for (const auto& node : graphNodes) {
|
||||
if (nodesToSkip.count(node) && !node->enforceBF16evenForGraphTail)
|
||||
continue;
|
||||
|
||||
if (node->getType() != Input && node->getType() != Output) {
|
||||
for (size_t i = 0; i < node->getOriginalInputsNumber(); i++) {
|
||||
auto &parent = node->getParentEdgesAtPort(i)[0]->getParent();
|
||||
if (!(parent->getType() == Input && parent->isConstant()) && // exclude nodes after Constant Inputs
|
||||
const auto &parent = node->getParentEdgesAtPort(i)[0]->getParent();
|
||||
if (!(parent->getType() == Input && parent->isConstant()) && // exclude skipNodes after Constant Inputs
|
||||
!(parent->getType() == Input && node->getType() == Eltwise) && // exclude Eltwise after Input since it supports conversion to BF16
|
||||
node->getOriginalInputPrecisionAtPort(i) == Precision::FP32)
|
||||
node->setOriginalInputPrecisionAtPort(i, Precision::BF16);
|
||||
@ -1226,7 +1265,6 @@ void MKLDNNGraph::EnforceBF16() {
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::shared_ptr<ngraph::Function> MKLDNNGraph::dump() const {
|
||||
|
@ -159,6 +159,12 @@ MKLDNNNode::MKLDNNNode(const std::shared_ptr<ngraph::Node>& op, const mkldnn::en
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const auto it = rtInfo.find("enforceBF16evenForGraphTail");
|
||||
if (it != rtInfo.end()) {
|
||||
if (const auto value = std::dynamic_pointer_cast<ngraph::VariantImpl<int64_t>>(it->second))
|
||||
enforceBF16evenForGraphTail = value->get();
|
||||
}
|
||||
}
|
||||
|
||||
MKLDNNNode::MKLDNNNode(const std::string& type, const std::string& name, const mkldnn::engine& eng, MKLDNNWeightsSharing::Ptr &w_cache)
|
||||
|
@ -593,6 +593,7 @@ protected:
|
||||
std::vector <impl_desc_type> implPriorities;
|
||||
std::vector <mkldnn::memory::format_tag> inputMemoryFormatsFilter;
|
||||
std::vector <mkldnn::memory::format_tag> outputMemoryFormatsFilter;
|
||||
bool enforceBF16evenForGraphTail = false;
|
||||
|
||||
std::string originalLayers; // contains names of the original layers separated by comma
|
||||
|
||||
|
79
inference-engine/src/mkldnn_plugin/nodes/mkldnn_shapeof.cpp
Normal file
79
inference-engine/src/mkldnn_plugin/nodes/mkldnn_shapeof.cpp
Normal file
@ -0,0 +1,79 @@
|
||||
// Copyright (C) 2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "mkldnn_shapeof.h"
|
||||
#include <ngraph/opsets/opset1.hpp>
|
||||
|
||||
using namespace MKLDNNPlugin;
|
||||
using namespace InferenceEngine;
|
||||
|
||||
bool MKLDNNShapeOfNode::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept {
|
||||
try {
|
||||
if (!one_of(op->get_type_info(),
|
||||
ngraph::op::v0::ShapeOf::get_type_info_static(),
|
||||
ngraph::op::v3::ShapeOf::get_type_info_static())) {
|
||||
errorMessage = "Node is not an instance of ShapeOf form the operation set v1 or v3.";
|
||||
return false;
|
||||
}
|
||||
} catch (...) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
MKLDNNShapeOfNode::MKLDNNShapeOfNode(const std::shared_ptr<ngraph::Node>& op, const mkldnn::engine& eng,
|
||||
MKLDNNWeightsSharing::Ptr &cache) : MKLDNNNode(op, eng, cache) {
|
||||
std::string errorMessage;
|
||||
if (isSupportedOperation(op, errorMessage)) {
|
||||
errorPrefix = "ShapeOf layer with name '" + getName() + "' ";
|
||||
if (op->get_input_partial_shape(0).size() == 0)
|
||||
IE_THROW() << errorPrefix << "gets unsupported input 0D tensor (scalar)";
|
||||
} else {
|
||||
IE_THROW(NotImplemented) << errorMessage;
|
||||
}
|
||||
}
|
||||
|
||||
void MKLDNNShapeOfNode::getSupportedDescriptors() {
|
||||
if (!descs.empty())
|
||||
return;
|
||||
if (getParentEdges().size() != 1)
|
||||
IE_THROW() << errorPrefix << "has incorrect number of input edges: " << getParentEdges().size();
|
||||
if (getChildEdges().empty())
|
||||
IE_THROW() << errorPrefix << "has incorrect number of output edges: " << getChildEdges().size();
|
||||
}
|
||||
|
||||
void MKLDNNShapeOfNode::initSupportedPrimitiveDescriptors() {
|
||||
if (!supportedPrimitiveDescriptors.empty())
|
||||
return;
|
||||
|
||||
Precision precision = getOriginalInputPrecisionAtPort(0);
|
||||
|
||||
const LayoutType dataFormats[4] = { LayoutType::ncsp, LayoutType::nspc, LayoutType::nCsp16c, LayoutType::nCsp8c };
|
||||
for (const auto &df : dataFormats) {
|
||||
addSupportedPrimDesc({{df, precision}},
|
||||
{{LayoutType::ncsp, Precision::I32}},
|
||||
impl_desc_type::ref);
|
||||
}
|
||||
}
|
||||
|
||||
void MKLDNNShapeOfNode::execute(mkldnn::stream strm) {
|
||||
auto inPtr = getParentEdgeAt(0)->getMemoryPtr();
|
||||
auto outPtr = getChildEdgeAt(0)->getMemoryPtr();
|
||||
auto inDims = inPtr->getStaticDims();
|
||||
size_t dimsCount = inDims.size();
|
||||
if (outPtr->getStaticDims().size() != 1 || dimsCount != outPtr->getStaticDims()[0])
|
||||
IE_THROW() << errorPrefix << "has inconsistent input shape and output size";
|
||||
|
||||
auto *dst = reinterpret_cast<int *>(getChildEdgeAt(0)->getMemoryPtr()->GetPtr());
|
||||
|
||||
for (size_t i = 0; i < dimsCount; i++) {
|
||||
dst[i] = inDims[i];
|
||||
}
|
||||
}
|
||||
|
||||
bool MKLDNNShapeOfNode::created() const {
|
||||
return getType() == ShapeOf;
|
||||
}
|
||||
|
||||
REG_MKLDNN_PRIM_FOR(MKLDNNShapeOfNode, ShapeOf)
|
39
inference-engine/src/mkldnn_plugin/nodes/mkldnn_shapeof.h
Normal file
39
inference-engine/src/mkldnn_plugin/nodes/mkldnn_shapeof.h
Normal file
@ -0,0 +1,39 @@
|
||||
// Copyright (C) 2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <mkldnn_node.h>
|
||||
#include <string>
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
#include <mkldnn_extension_utils.h>
|
||||
|
||||
namespace MKLDNNPlugin {
|
||||
|
||||
class MKLDNNShapeOfNode : public MKLDNNNode {
|
||||
public:
|
||||
MKLDNNShapeOfNode(const std::shared_ptr<ngraph::Node>& op, const mkldnn::engine& eng, MKLDNNWeightsSharing::Ptr &cache);
|
||||
|
||||
void getSupportedDescriptors() override;
|
||||
void initSupportedPrimitiveDescriptors() override;
|
||||
void createPrimitive() override {
|
||||
if (inputShapesDefined())
|
||||
updateLastInputDims();
|
||||
};
|
||||
void execute(mkldnn::stream strm) override;
|
||||
bool created() const override;
|
||||
bool needPrepareParams() const override {return false;};
|
||||
void executeDynamicImpl(mkldnn::stream strm) override { execute(strm); }
|
||||
std::vector<VectorDims> shapeInfer() const override {
|
||||
return {VectorDims{getParentEdgesAtPort(0)[0]->getMemory().getStaticDims().size()}};
|
||||
}
|
||||
|
||||
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept;
|
||||
|
||||
private:
|
||||
std::string errorPrefix;
|
||||
};
|
||||
|
||||
} // namespace MKLDNNPlugin
|
@ -100,7 +100,7 @@ protected:
|
||||
// performance counters
|
||||
|
||||
expectedPrecisions["Matmul_0"] = "BF16";
|
||||
expectedPrecisions["Mul_1"] = "BF16";
|
||||
expectedPrecisions["Mul_1"] = netPrecision.name(); // tail kept in FP32 precision
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -1,345 +1,345 @@
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//// Copyright (C) 2018-2021 Intel Corporation
|
||||
//// SPDX-License-Identifier: Apache-2.0
|
||||
////
|
||||
//
|
||||
|
||||
#include <shared_test_classes/single_layer/gather.hpp>
|
||||
#include "ngraph_functions/builders.hpp"
|
||||
#include "test_utils/cpu_test_utils.hpp"
|
||||
|
||||
using namespace InferenceEngine;
|
||||
using namespace CPUTestUtils;
|
||||
|
||||
namespace CPULayerTestsDefinitions {
|
||||
|
||||
using inputShapesPair = std::pair<std::vector<ov::PartialShape>, std::vector<std::vector<ov::Shape>>>;
|
||||
|
||||
typedef std::tuple<
|
||||
inputShapesPair, // Input shapes
|
||||
int64_t, // Axis
|
||||
int64_t, // Batch dims
|
||||
InferenceEngine::Precision, // Network precision
|
||||
bool, // Is axis input constant
|
||||
std::string, // Device name
|
||||
CPUSpecificParams // CPU specific params
|
||||
> GatherLayerTestCPUParams;
|
||||
|
||||
class GatherLayerTestCPU : public testing::WithParamInterface<GatherLayerTestCPUParams>,
|
||||
virtual public LayerTestsUtils::LayerTestsCommon, public CPUTestsBase {
|
||||
public:
|
||||
static std::string getTestCaseName(testing::TestParamInfo<GatherLayerTestCPUParams> obj) {
|
||||
inputShapesPair inputShapes;
|
||||
int axis, batchDims;
|
||||
Precision netPrecision;
|
||||
std::string targetDevice;
|
||||
bool isAxisConstant;
|
||||
CPUSpecificParams cpuParams;
|
||||
std::tie(inputShapes, axis, batchDims, netPrecision, isAxisConstant, targetDevice, cpuParams) = obj.param;
|
||||
|
||||
std::ostringstream result;
|
||||
result << "DynShapes=" << CommonTestUtils::partialShape2str(inputShapes.first) << "_";
|
||||
result << "StatShapes=" << CommonTestUtils::vec2str(inputShapes.second) << "_";
|
||||
result << "axis=" << axis << "_";
|
||||
result << "batchDims=" << batchDims << "_";
|
||||
result << "netPrc=" << netPrecision.name() << "_";
|
||||
result << "constAx=" << (isAxisConstant ? "True" : "False") << "_";
|
||||
result << "trgDev=" << targetDevice;
|
||||
result << CPUTestsBase::getTestCaseName(cpuParams);
|
||||
|
||||
return result.str();
|
||||
}
|
||||
|
||||
protected:
|
||||
void SetUp() override {
|
||||
inputShapesPair inputShapes;
|
||||
int64_t batchDims;
|
||||
Precision netPrecision;
|
||||
CPUSpecificParams cpuParams;
|
||||
bool isAxisConstant = true;
|
||||
std::tie(inputShapes, axis, batchDims, netPrecision, isAxisConstant, targetDevice, cpuParams) = this->GetParam();
|
||||
|
||||
std::tie(inFmts, outFmts, priority, selectedType) = cpuParams;
|
||||
|
||||
selectedType = std::string("ref_any_") + netPrecision.name();
|
||||
|
||||
targetStaticShapes.reserve(inputShapes.second.size());
|
||||
inputDynamicShapes.reserve(inputShapes.first.size());
|
||||
for (int i = 0; i < (isAxisConstant ? 2 : 3); i++) {
|
||||
if (inputShapes.second.size() > i)
|
||||
targetStaticShapes.push_back({inputShapes.second[i]});
|
||||
if (inputShapes.first.size() > i)
|
||||
inputDynamicShapes.push_back(inputShapes.first[i]);
|
||||
}
|
||||
const ov::Shape& inputDataShape = targetStaticShapes.front().front(), indicesShape = targetStaticShapes.front()[1];
|
||||
dataSrcRank = inputDataShape.size();
|
||||
|
||||
const auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
|
||||
ov::ParameterVector functionParams {
|
||||
ngraph::builder::makeParams(ngPrc, { {"data", inputDataShape} })[0],
|
||||
ngraph::builder::makeParams(ov::element::i32, { {"indices", indicesShape} })[0]
|
||||
};
|
||||
if (!isAxisConstant) {
|
||||
functionParams.push_back(ngraph::builder::makeParams(ov::element::i32, { {"axis", {1}} })[0]);
|
||||
}
|
||||
auto paramOuts = ngraph::helpers::convert2OutputVector(ngraph::helpers::castOps2Nodes<ov::op::v0::Parameter>(functionParams));
|
||||
std::shared_ptr<ov::Node> gatherNode;
|
||||
if (isAxisConstant) {
|
||||
gatherNode = std::make_shared<ov::op::v8::Gather>(paramOuts[0], paramOuts[1],
|
||||
ov::op::v0::Constant::create(ov::element::i64, ov::Shape({}), { axis }), batchDims);
|
||||
} else {
|
||||
gatherNode = std::make_shared<ov::op::v8::Gather>(paramOuts[0], paramOuts[1], paramOuts[2], batchDims);
|
||||
}
|
||||
|
||||
ov::ResultVector results{ std::make_shared<ov::op::v0::Result>(gatherNode) };
|
||||
function = std::make_shared<ov::Function>(results, functionParams, "Gather");
|
||||
}
|
||||
|
||||
InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo &inputInfo) const override {
|
||||
if (inputInfo.name() == "indices") {
|
||||
const auto& td = inputInfo.getTensorDesc();
|
||||
size_t normAxis = axis < 0 ? axis + dataSrcRank : axis;
|
||||
const auto axDim = targetStaticShapes[index][0][normAxis];
|
||||
if (axDim == 1) {
|
||||
// Random generator cannot generate values in range [0; 0]
|
||||
int values[1] = { 0 };
|
||||
return FuncTestUtils::createAndFillBlobWithFloatArray<int32_t>(td, values, 1);
|
||||
} else {
|
||||
return FuncTestUtils::createAndFillBlob(td, axDim - 1, 0);
|
||||
}
|
||||
} else if (inputInfo.name() == "axis") {
|
||||
int values[1] = { static_cast<int32_t>(axis) };
|
||||
return FuncTestUtils::createAndFillBlobWithFloatArray<int32_t>(inputInfo.getTensorDesc(), values, 1);
|
||||
} else {
|
||||
return LayerTestsCommon::GenerateInput(inputInfo);
|
||||
}
|
||||
}
|
||||
|
||||
int64_t axis = 0;
|
||||
int64_t dataSrcRank = 0;
|
||||
};
|
||||
|
||||
TEST_P(GatherLayerTestCPU, CompareWithRefs) {
|
||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||
|
||||
Run();
|
||||
CheckPluginRelatedResults(executableNetwork, "Gather");
|
||||
}
|
||||
|
||||
namespace {
|
||||
const std::vector<InferenceEngine::Precision> netPrecisions = {
|
||||
InferenceEngine::Precision::FP32,
|
||||
InferenceEngine::Precision::BF16,
|
||||
InferenceEngine::Precision::I8
|
||||
};
|
||||
|
||||
// 1D
|
||||
const std::vector<inputShapesPair> staticInputShapes1D = {
|
||||
{
|
||||
{},
|
||||
{ // Static shapes
|
||||
{{4}, {2, 3, 4}}
|
||||
}
|
||||
},
|
||||
{
|
||||
{},
|
||||
{ // Static shapes
|
||||
{{4}, {1}}
|
||||
}
|
||||
},
|
||||
{
|
||||
{},
|
||||
{ // Static shapes
|
||||
{{4}, {9}}
|
||||
}
|
||||
},
|
||||
{
|
||||
{},
|
||||
{ // Static shapes
|
||||
{{5}, {5}}
|
||||
}
|
||||
}
|
||||
};
|
||||
const std::vector<inputShapesPair> dynamicInputShapes1D = {
|
||||
{
|
||||
{ // Origin dynamic shapes
|
||||
{ov::Dimension(4, 6)}, {ov::Dimension(1, 10)}, {ov::Dimension(1, 2)}
|
||||
},
|
||||
{ // Dynamic shapes instances
|
||||
{{4}, {1}, {1}},
|
||||
{{4}, {9}, {1}},
|
||||
{{5}, {5}, {1}}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_StaticShape1D, GatherLayerTestCPU,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(staticInputShapes1D),
|
||||
::testing::Values(0),
|
||||
::testing::Values(0),
|
||||
::testing::ValuesIn(netPrecisions),
|
||||
::testing::Values(true),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU),
|
||||
::testing::Values(CPUSpecificParams{})),
|
||||
GatherLayerTestCPU::getTestCaseName);
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_DynamicShape1D, GatherLayerTestCPU,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(dynamicInputShapes1D),
|
||||
::testing::Values(0),
|
||||
::testing::Values(0),
|
||||
::testing::ValuesIn(netPrecisions),
|
||||
::testing::Values(true, false),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU),
|
||||
::testing::Values(CPUSpecificParams{})),
|
||||
GatherLayerTestCPU::getTestCaseName);
|
||||
|
||||
// 2D
|
||||
const std::vector<inputShapesPair> staticInputShapes2D = {
|
||||
{
|
||||
{},
|
||||
{ // Static shapes
|
||||
{{4, 7}, {4, 55}}
|
||||
}
|
||||
},
|
||||
{
|
||||
{},
|
||||
{ // Static shapes
|
||||
{{4, 17}, {4, 17}}
|
||||
}
|
||||
},
|
||||
{
|
||||
{},
|
||||
{ // Static shapes
|
||||
{{4, 55}, {4, 7}}
|
||||
}
|
||||
}
|
||||
};
|
||||
const std::vector<inputShapesPair> dynamicInputShapes2D = {
|
||||
{
|
||||
{ // Origin dynamic shapes
|
||||
{4, ov::Dimension(3, 99)},
|
||||
{4, ov::Dimension(3, 99)},
|
||||
{1}
|
||||
},
|
||||
{ // Dynamic shapes instances
|
||||
{{4, 7}, {4, 55}, {1}},
|
||||
{{4, 55}, {4, 7}, {1}},
|
||||
{{4, 17}, {4, 17}, {1}}
|
||||
}
|
||||
}
|
||||
};
|
||||
const std::vector<inputShapesPair> dynamicInputShapes2Dv2 = {
|
||||
{
|
||||
{ // Origin dynamic shapes
|
||||
{ov::Dimension(3, 99), ov::Dimension(3, 99)},
|
||||
{-1, ov::Dimension(3, 99)},
|
||||
{1}
|
||||
},
|
||||
{ // Dynamic shapes instances
|
||||
{{4, 7}, {4, 55}, {1}},
|
||||
{{8, 55}, {5, 7}, {1}}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_StaticShape2D, GatherLayerTestCPU,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(staticInputShapes2D),
|
||||
::testing::Values(1),
|
||||
::testing::ValuesIn(std::vector<int64_t>{0, 1}),
|
||||
::testing::ValuesIn(netPrecisions),
|
||||
::testing::Values(true),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU),
|
||||
::testing::Values(CPUSpecificParams{})),
|
||||
GatherLayerTestCPU::getTestCaseName);
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_DynamicShape2D, GatherLayerTestCPU,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(dynamicInputShapes2D),
|
||||
::testing::Values(1),
|
||||
::testing::ValuesIn(std::vector<int64_t>{0, 1}),
|
||||
::testing::ValuesIn(netPrecisions),
|
||||
::testing::Values(true, false),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU),
|
||||
::testing::Values(CPUSpecificParams{})),
|
||||
GatherLayerTestCPU::getTestCaseName);
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_DynamicShape2Dv2, GatherLayerTestCPU,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(dynamicInputShapes2Dv2),
|
||||
::testing::Values(0),
|
||||
::testing::Values(0),
|
||||
::testing::ValuesIn(netPrecisions),
|
||||
::testing::Values(true, false),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU),
|
||||
::testing::Values(CPUSpecificParams{})),
|
||||
GatherLayerTestCPU::getTestCaseName);
|
||||
|
||||
// 4D
|
||||
const std::vector<inputShapesPair> staticInputShapes4D = {
|
||||
{
|
||||
{},
|
||||
{ // Static shapes
|
||||
{{4, 5, 6, 7}, {2, 5, 1}}
|
||||
}
|
||||
},
|
||||
{
|
||||
{},
|
||||
{ // Static shapes
|
||||
{{10, 5, 6, 7}, {2, 5, 2}}
|
||||
}
|
||||
},
|
||||
{
|
||||
{},
|
||||
{ // Static shapes
|
||||
{{16, 5, 6, 7}, {3, 5, 3}}
|
||||
}
|
||||
}
|
||||
};
|
||||
const std::vector<inputShapesPair> dynamicInputShapes4D = {
|
||||
{
|
||||
{ // Origin dynamic shapes
|
||||
{ov::Dimension(4, 20), 5, 6, 7},
|
||||
{ov::Dimension(2, 4), 5, ov::Dimension(1, 4)},
|
||||
{1}
|
||||
},
|
||||
{ // Dynamic shapes instances
|
||||
{{4, 5, 6, 7}, {2, 5, 1}, {1}},
|
||||
{{10, 5, 6, 7}, {2, 5, 2}, {1}},
|
||||
{{16, 5, 6, 7}, {3, 5, 3}, {1}}
|
||||
}
|
||||
},
|
||||
{
|
||||
{ // Origin dynamic shapes
|
||||
{-1, -1, -1, -1}, {-1, -1, -1}, {1}
|
||||
},
|
||||
{ // Dynamic shapes instances
|
||||
{{4, 5, 6, 4}, {2, 5, 16}, {1}},
|
||||
{{10, 5, 6, 8}, {2, 5, 24}, {1}}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_StaticShape4D, GatherLayerTestCPU,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(staticInputShapes4D),
|
||||
::testing::ValuesIn(std::vector<int64_t>{0, 1, 2, -1}),
|
||||
::testing::Values(0),
|
||||
::testing::ValuesIn(netPrecisions),
|
||||
::testing::Values(true),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU),
|
||||
::testing::Values(CPUSpecificParams{})),
|
||||
GatherLayerTestCPU::getTestCaseName);
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_DynamicShape4D, GatherLayerTestCPU,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(dynamicInputShapes4D),
|
||||
::testing::ValuesIn(std::vector<int64_t>{0, 1, 2, -1}),
|
||||
::testing::Values(0),
|
||||
::testing::ValuesIn(netPrecisions),
|
||||
::testing::Values(true, false),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU),
|
||||
::testing::Values(CPUSpecificParams{})),
|
||||
GatherLayerTestCPU::getTestCaseName);
|
||||
} // namespace
|
||||
} // namespace CPULayerTestsDefinitions
|
||||
//#include <shared_test_classes/single_layer/gather.hpp>
|
||||
//#include "ngraph_functions/builders.hpp"
|
||||
//#include "test_utils/cpu_test_utils.hpp"
|
||||
//
|
||||
//using namespace InferenceEngine;
|
||||
//using namespace CPUTestUtils;
|
||||
//
|
||||
//namespace CPULayerTestsDefinitions {
|
||||
//
|
||||
//using inputShapesPair = std::pair<std::vector<ov::PartialShape>, std::vector<std::vector<ov::Shape>>>;
|
||||
//
|
||||
//typedef std::tuple<
|
||||
// inputShapesPair, // Input shapes
|
||||
// int64_t, // Axis
|
||||
// int64_t, // Batch dims
|
||||
// InferenceEngine::Precision, // Network precision
|
||||
// bool, // Is axis input constant
|
||||
// std::string, // Device name
|
||||
// CPUSpecificParams // CPU specific params
|
||||
//> GatherLayerTestCPUParams;
|
||||
//
|
||||
//class GatherLayerTestCPU : public testing::WithParamInterface<GatherLayerTestCPUParams>,
|
||||
// virtual public LayerTestsUtils::LayerTestsCommon, public CPUTestsBase {
|
||||
//public:
|
||||
// static std::string getTestCaseName(testing::TestParamInfo<GatherLayerTestCPUParams> obj) {
|
||||
// inputShapesPair inputShapes;
|
||||
// int axis, batchDims;
|
||||
// Precision netPrecision;
|
||||
// std::string targetDevice;
|
||||
// bool isAxisConstant;
|
||||
// CPUSpecificParams cpuParams;
|
||||
// std::tie(inputShapes, axis, batchDims, netPrecision, isAxisConstant, targetDevice, cpuParams) = obj.param;
|
||||
//
|
||||
// std::ostringstream result;
|
||||
// result << "DynShapes=" << CommonTestUtils::partialShape2str(inputShapes.first) << "_";
|
||||
// result << "StatShapes=" << CommonTestUtils::vec2str(inputShapes.second) << "_";
|
||||
// result << "axis=" << axis << "_";
|
||||
// result << "batchDims=" << batchDims << "_";
|
||||
// result << "netPrc=" << netPrecision.name() << "_";
|
||||
// result << "constAx=" << (isAxisConstant ? "True" : "False") << "_";
|
||||
// result << "trgDev=" << targetDevice;
|
||||
// result << CPUTestsBase::getTestCaseName(cpuParams);
|
||||
//
|
||||
// return result.str();
|
||||
// }
|
||||
//
|
||||
//protected:
|
||||
// void SetUp() override {
|
||||
// inputShapesPair inputShapes;
|
||||
// int64_t batchDims;
|
||||
// Precision netPrecision;
|
||||
// CPUSpecificParams cpuParams;
|
||||
// bool isAxisConstant = true;
|
||||
// std::tie(inputShapes, axis, batchDims, netPrecision, isAxisConstant, targetDevice, cpuParams) = this->GetParam();
|
||||
//
|
||||
// std::tie(inFmts, outFmts, priority, selectedType) = cpuParams;
|
||||
//
|
||||
// selectedType = std::string("ref_any_") + netPrecision.name();
|
||||
//
|
||||
// targetStaticShapes.reserve(inputShapes.second.size());
|
||||
// inputDynamicShapes.reserve(inputShapes.first.size());
|
||||
// for (int i = 0; i < (isAxisConstant ? 2 : 3); i++) {
|
||||
// if (inputShapes.second.size() > i)
|
||||
// targetStaticShapes.push_back({inputShapes.second[i]});
|
||||
// if (inputShapes.first.size() > i)
|
||||
// inputDynamicShapes.push_back(inputShapes.first[i]);
|
||||
// }
|
||||
// const ov::Shape& inputDataShape = targetStaticShapes.front().front(), indicesShape = targetStaticShapes.front()[1];
|
||||
// dataSrcRank = inputDataShape.size();
|
||||
//
|
||||
// const auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
|
||||
// ov::ParameterVector functionParams {
|
||||
// ngraph::builder::makeParams(ngPrc, { {"data", inputDataShape} })[0],
|
||||
// ngraph::builder::makeParams(ov::element::i32, { {"indices", indicesShape} })[0]
|
||||
// };
|
||||
// if (!isAxisConstant) {
|
||||
// functionParams.push_back(ngraph::builder::makeParams(ov::element::i32, { {"axis", {1}} })[0]);
|
||||
// }
|
||||
// auto paramOuts = ngraph::helpers::convert2OutputVector(ngraph::helpers::castOps2Nodes<ov::op::v0::Parameter>(functionParams));
|
||||
// std::shared_ptr<ov::Node> gatherNode;
|
||||
// if (isAxisConstant) {
|
||||
// gatherNode = std::make_shared<ov::op::v8::Gather>(paramOuts[0], paramOuts[1],
|
||||
// ov::op::v0::Constant::create(ov::element::i64, ov::Shape({}), { axis }), batchDims);
|
||||
// } else {
|
||||
// gatherNode = std::make_shared<ov::op::v8::Gather>(paramOuts[0], paramOuts[1], paramOuts[2], batchDims);
|
||||
// }
|
||||
//
|
||||
// ov::ResultVector results{ std::make_shared<ov::op::v0::Result>(gatherNode) };
|
||||
// function = std::make_shared<ov::Function>(results, functionParams, "Gather");
|
||||
// }
|
||||
//
|
||||
// InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo &inputInfo) const override {
|
||||
// if (inputInfo.name() == "indices") {
|
||||
// const auto& td = inputInfo.getTensorDesc();
|
||||
// size_t normAxis = axis < 0 ? axis + dataSrcRank : axis;
|
||||
// const auto axDim = targetStaticShapes[index][0][normAxis];
|
||||
// if (axDim == 1) {
|
||||
// // Random generator cannot generate values in range [0; 0]
|
||||
// int values[1] = { 0 };
|
||||
// return FuncTestUtils::createAndFillBlobWithFloatArray<int32_t>(td, values, 1);
|
||||
// } else {
|
||||
// return FuncTestUtils::createAndFillBlob(td, axDim - 1, 0);
|
||||
// }
|
||||
// } else if (inputInfo.name() == "axis") {
|
||||
// int values[1] = { static_cast<int32_t>(axis) };
|
||||
// return FuncTestUtils::createAndFillBlobWithFloatArray<int32_t>(inputInfo.getTensorDesc(), values, 1);
|
||||
// } else {
|
||||
// return LayerTestsCommon::GenerateInput(inputInfo);
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// int64_t axis = 0;
|
||||
// int64_t dataSrcRank = 0;
|
||||
//};
|
||||
//
|
||||
//TEST_P(GatherLayerTestCPU, CompareWithRefs) {
|
||||
// SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||
//
|
||||
// Run();
|
||||
// CheckPluginRelatedResults(executableNetwork, "Gather");
|
||||
//}
|
||||
//
|
||||
//namespace {
|
||||
//const std::vector<InferenceEngine::Precision> netPrecisions = {
|
||||
// InferenceEngine::Precision::FP32,
|
||||
// InferenceEngine::Precision::BF16,
|
||||
// InferenceEngine::Precision::I8
|
||||
//};
|
||||
//
|
||||
//// 1D
|
||||
//const std::vector<inputShapesPair> staticInputShapes1D = {
|
||||
// {
|
||||
// {},
|
||||
// { // Static shapes
|
||||
// {{4}, {2, 3, 4}}
|
||||
// }
|
||||
// },
|
||||
// {
|
||||
// {},
|
||||
// { // Static shapes
|
||||
// {{4}, {1}}
|
||||
// }
|
||||
// },
|
||||
// {
|
||||
// {},
|
||||
// { // Static shapes
|
||||
// {{4}, {9}}
|
||||
// }
|
||||
// },
|
||||
// {
|
||||
// {},
|
||||
// { // Static shapes
|
||||
// {{5}, {5}}
|
||||
// }
|
||||
// }
|
||||
//};
|
||||
//const std::vector<inputShapesPair> dynamicInputShapes1D = {
|
||||
// {
|
||||
// { // Origin dynamic shapes
|
||||
// {ov::Dimension(4, 6)}, {ov::Dimension(1, 10)}, {ov::Dimension(1, 2)}
|
||||
// },
|
||||
// { // Dynamic shapes instances
|
||||
// {{4}, {1}, {1}},
|
||||
// {{4}, {9}, {1}},
|
||||
// {{5}, {5}, {1}}
|
||||
// }
|
||||
// }
|
||||
//};
|
||||
//
|
||||
//INSTANTIATE_TEST_SUITE_P(smoke_StaticShape1D, GatherLayerTestCPU,
|
||||
// ::testing::Combine(
|
||||
// ::testing::ValuesIn(staticInputShapes1D),
|
||||
// ::testing::Values(0),
|
||||
// ::testing::Values(0),
|
||||
// ::testing::ValuesIn(netPrecisions),
|
||||
// ::testing::Values(true),
|
||||
// ::testing::Values(CommonTestUtils::DEVICE_CPU),
|
||||
// ::testing::Values(CPUSpecificParams{})),
|
||||
// GatherLayerTestCPU::getTestCaseName);
|
||||
//
|
||||
//INSTANTIATE_TEST_SUITE_P(smoke_DynamicShape1D, GatherLayerTestCPU,
|
||||
// ::testing::Combine(
|
||||
// ::testing::ValuesIn(dynamicInputShapes1D),
|
||||
// ::testing::Values(0),
|
||||
// ::testing::Values(0),
|
||||
// ::testing::ValuesIn(netPrecisions),
|
||||
// ::testing::Values(true, false),
|
||||
// ::testing::Values(CommonTestUtils::DEVICE_CPU),
|
||||
// ::testing::Values(CPUSpecificParams{})),
|
||||
// GatherLayerTestCPU::getTestCaseName);
|
||||
//
|
||||
//// 2D
|
||||
//const std::vector<inputShapesPair> staticInputShapes2D = {
|
||||
// {
|
||||
// {},
|
||||
// { // Static shapes
|
||||
// {{4, 7}, {4, 55}}
|
||||
// }
|
||||
// },
|
||||
// {
|
||||
// {},
|
||||
// { // Static shapes
|
||||
// {{4, 17}, {4, 17}}
|
||||
// }
|
||||
// },
|
||||
// {
|
||||
// {},
|
||||
// { // Static shapes
|
||||
// {{4, 55}, {4, 7}}
|
||||
// }
|
||||
// }
|
||||
//};
|
||||
//const std::vector<inputShapesPair> dynamicInputShapes2D = {
|
||||
// {
|
||||
// { // Origin dynamic shapes
|
||||
// {4, ov::Dimension(3, 99)},
|
||||
// {4, ov::Dimension(3, 99)},
|
||||
// {1}
|
||||
// },
|
||||
// { // Dynamic shapes instances
|
||||
// {{4, 7}, {4, 55}, {1}},
|
||||
// {{4, 55}, {4, 7}, {1}},
|
||||
// {{4, 17}, {4, 17}, {1}}
|
||||
// }
|
||||
// }
|
||||
//};
|
||||
//const std::vector<inputShapesPair> dynamicInputShapes2Dv2 = {
|
||||
// {
|
||||
// { // Origin dynamic shapes
|
||||
// {ov::Dimension(3, 99), ov::Dimension(3, 99)},
|
||||
// {-1, ov::Dimension(3, 99)},
|
||||
// {1}
|
||||
// },
|
||||
// { // Dynamic shapes instances
|
||||
// {{4, 7}, {4, 55}, {1}},
|
||||
// {{8, 55}, {5, 7}, {1}}
|
||||
// }
|
||||
// }
|
||||
//};
|
||||
//
|
||||
//INSTANTIATE_TEST_SUITE_P(smoke_StaticShape2D, GatherLayerTestCPU,
|
||||
// ::testing::Combine(
|
||||
// ::testing::ValuesIn(staticInputShapes2D),
|
||||
// ::testing::Values(1),
|
||||
// ::testing::ValuesIn(std::vector<int64_t>{0, 1}),
|
||||
// ::testing::ValuesIn(netPrecisions),
|
||||
// ::testing::Values(true),
|
||||
// ::testing::Values(CommonTestUtils::DEVICE_CPU),
|
||||
// ::testing::Values(CPUSpecificParams{})),
|
||||
// GatherLayerTestCPU::getTestCaseName);
|
||||
//
|
||||
//INSTANTIATE_TEST_SUITE_P(smoke_DynamicShape2D, GatherLayerTestCPU,
|
||||
// ::testing::Combine(
|
||||
// ::testing::ValuesIn(dynamicInputShapes2D),
|
||||
// ::testing::Values(1),
|
||||
// ::testing::ValuesIn(std::vector<int64_t>{0, 1}),
|
||||
// ::testing::ValuesIn(netPrecisions),
|
||||
// ::testing::Values(true, false),
|
||||
// ::testing::Values(CommonTestUtils::DEVICE_CPU),
|
||||
// ::testing::Values(CPUSpecificParams{})),
|
||||
// GatherLayerTestCPU::getTestCaseName);
|
||||
//
|
||||
//INSTANTIATE_TEST_SUITE_P(smoke_DynamicShape2Dv2, GatherLayerTestCPU,
|
||||
// ::testing::Combine(
|
||||
// ::testing::ValuesIn(dynamicInputShapes2Dv2),
|
||||
// ::testing::Values(0),
|
||||
// ::testing::Values(0),
|
||||
// ::testing::ValuesIn(netPrecisions),
|
||||
// ::testing::Values(true, false),
|
||||
// ::testing::Values(CommonTestUtils::DEVICE_CPU),
|
||||
// ::testing::Values(CPUSpecificParams{})),
|
||||
// GatherLayerTestCPU::getTestCaseName);
|
||||
//
|
||||
//// 4D
|
||||
//const std::vector<inputShapesPair> staticInputShapes4D = {
|
||||
// {
|
||||
// {},
|
||||
// { // Static shapes
|
||||
// {{4, 5, 6, 7}, {2, 5, 1}}
|
||||
// }
|
||||
// },
|
||||
// {
|
||||
// {},
|
||||
// { // Static shapes
|
||||
// {{10, 5, 6, 7}, {2, 5, 2}}
|
||||
// }
|
||||
// },
|
||||
// {
|
||||
// {},
|
||||
// { // Static shapes
|
||||
// {{16, 5, 6, 7}, {3, 5, 3}}
|
||||
// }
|
||||
// }
|
||||
//};
|
||||
//const std::vector<inputShapesPair> dynamicInputShapes4D = {
|
||||
// {
|
||||
// { // Origin dynamic shapes
|
||||
// {ov::Dimension(4, 20), 5, 6, 7},
|
||||
// {ov::Dimension(2, 4), 5, ov::Dimension(1, 4)},
|
||||
// {1}
|
||||
// },
|
||||
// { // Dynamic shapes instances
|
||||
// {{4, 5, 6, 7}, {2, 5, 1}, {1}},
|
||||
// {{10, 5, 6, 7}, {2, 5, 2}, {1}},
|
||||
// {{16, 5, 6, 7}, {3, 5, 3}, {1}}
|
||||
// }
|
||||
// },
|
||||
// {
|
||||
// { // Origin dynamic shapes
|
||||
// {-1, -1, -1, -1}, {-1, -1, -1}, {1}
|
||||
// },
|
||||
// { // Dynamic shapes instances
|
||||
// {{4, 5, 6, 4}, {2, 5, 16}, {1}},
|
||||
// {{10, 5, 6, 8}, {2, 5, 24}, {1}}
|
||||
// }
|
||||
// }
|
||||
//};
|
||||
//
|
||||
//INSTANTIATE_TEST_SUITE_P(smoke_StaticShape4D, GatherLayerTestCPU,
|
||||
// ::testing::Combine(
|
||||
// ::testing::ValuesIn(staticInputShapes4D),
|
||||
// ::testing::ValuesIn(std::vector<int64_t>{0, 1, 2, -1}),
|
||||
// ::testing::Values(0),
|
||||
// ::testing::ValuesIn(netPrecisions),
|
||||
// ::testing::Values(true),
|
||||
// ::testing::Values(CommonTestUtils::DEVICE_CPU),
|
||||
// ::testing::Values(CPUSpecificParams{})),
|
||||
// GatherLayerTestCPU::getTestCaseName);
|
||||
//
|
||||
//INSTANTIATE_TEST_SUITE_P(smoke_DynamicShape4D, GatherLayerTestCPU,
|
||||
// ::testing::Combine(
|
||||
// ::testing::ValuesIn(dynamicInputShapes4D),
|
||||
// ::testing::ValuesIn(std::vector<int64_t>{0, 1, 2, -1}),
|
||||
// ::testing::Values(0),
|
||||
// ::testing::ValuesIn(netPrecisions),
|
||||
// ::testing::Values(true, false),
|
||||
// ::testing::Values(CommonTestUtils::DEVICE_CPU),
|
||||
// ::testing::Values(CPUSpecificParams{})),
|
||||
// GatherLayerTestCPU::getTestCaseName);
|
||||
//} // namespace
|
||||
//} // namespace CPULayerTestsDefinitions
|
||||
|
@ -0,0 +1,175 @@
|
||||
//// Copyright (C) 2021 Intel Corporation
|
||||
//// SPDX-License-Identifier: Apache-2.0
|
||||
////
|
||||
//
|
||||
//#include "test_utils/cpu_test_utils.hpp"
|
||||
//
|
||||
//#include "ngraph_functions/builders.hpp"
|
||||
//#include "ngraph_functions/utils/ngraph_helpers.hpp"
|
||||
//
|
||||
//using namespace InferenceEngine;
|
||||
//using namespace CPUTestUtils;
|
||||
//
|
||||
//namespace CPULayerTestsDefinitions {
|
||||
//typedef std::tuple<
|
||||
// std::pair<std::vector<ngraph::PartialShape>, std::vector<std::vector<ngraph::Shape>>> // input shape
|
||||
//> ShapeOfSpecificParams;
|
||||
//
|
||||
//typedef std::tuple<
|
||||
// ShapeOfSpecificParams,
|
||||
// InferenceEngine::Precision, // Net precision
|
||||
// LayerTestsUtils::TargetDevice // Device name
|
||||
//> ShapeOfLayerTestParams;
|
||||
//
|
||||
//typedef std::tuple<
|
||||
// CPULayerTestsDefinitions::ShapeOfLayerTestParams,
|
||||
// CPUSpecificParams> ShapeOfLayerCPUTestParamsSet;
|
||||
//
|
||||
//class ShapeOfLayerCPUTest : public testing::WithParamInterface<ShapeOfLayerCPUTestParamsSet>,
|
||||
// virtual public LayerTestsUtils::LayerTestsCommon, public CPUTestsBase {
|
||||
//public:
|
||||
// static std::string getTestCaseName(testing::TestParamInfo<ShapeOfLayerCPUTestParamsSet> obj) {
|
||||
// CPULayerTestsDefinitions::ShapeOfLayerTestParams basicParamsSet;
|
||||
// CPUSpecificParams cpuParams;
|
||||
// std::tie(basicParamsSet, cpuParams) = obj.param;
|
||||
// std::string td;
|
||||
// Precision netPr;
|
||||
// std::pair<std::vector<ngraph::PartialShape>, std::vector<std::vector<ngraph::Shape>>> shapes;
|
||||
//
|
||||
// ShapeOfSpecificParams shapeOfPar;
|
||||
// std::tie(shapeOfPar, netPr, td) = basicParamsSet;
|
||||
// std::tie(shapes) = shapeOfPar;
|
||||
// std::ostringstream result;
|
||||
// result << "ShapeOfTest_";
|
||||
// result << std::to_string(obj.index) << "_";
|
||||
// result << "Prec=" << netPr.name() << "_";
|
||||
// result << CPUTestsBase::getTestCaseName(cpuParams) << "_";
|
||||
// result << "IS=";
|
||||
// for (const auto& shape : shapes.second) {
|
||||
// result << "(";
|
||||
// for (const auto& item : shape) {
|
||||
// result << CommonTestUtils::vec2str(item);
|
||||
// }
|
||||
// result << ")_";
|
||||
// }
|
||||
// return result.str();
|
||||
// }
|
||||
//protected:
|
||||
// void SetUp() override {
|
||||
// CPULayerTestsDefinitions::ShapeOfLayerTestParams basicParamsSet;
|
||||
// CPUSpecificParams cpuParams;
|
||||
// std::tie(basicParamsSet, cpuParams) = this->GetParam();
|
||||
// std::tie(inFmts, outFmts, priority, selectedType) = cpuParams;
|
||||
//
|
||||
// CPULayerTestsDefinitions::ShapeOfSpecificParams shapeOfParams;
|
||||
// auto netPrecision = InferenceEngine::Precision::UNSPECIFIED;
|
||||
// std::pair<std::vector<ngraph::PartialShape>, std::vector<std::vector<ngraph::Shape>>> shapes;
|
||||
// std::tie(shapeOfParams, netPrecision, targetDevice) = basicParamsSet;
|
||||
// inPrc = netPrecision;
|
||||
// outPrc = Precision::I32;
|
||||
// std::tie(shapes) = shapeOfParams;
|
||||
// targetStaticShapes = shapes.second;
|
||||
// inputDynamicShapes = shapes.first;
|
||||
//
|
||||
// auto inType = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
|
||||
// auto param = ngraph::builder::makeParams(inType, {targetStaticShapes.front().front()});
|
||||
// auto paramOuts = ngraph::helpers::convert2OutputVector(ngraph::helpers::castOps2Nodes<ngraph::opset3::Parameter>(param));
|
||||
// auto shapeOf = std::make_shared<ngraph::opset3::ShapeOf>(paramOuts[0], ngraph::element::i32);
|
||||
// shapeOf->get_rt_info() = getCPUInfo();
|
||||
// selectedType = std::string("ref_") + inPrc.name();
|
||||
//
|
||||
// const ngraph::ResultVector results{std::make_shared<ngraph::opset3::Result>(shapeOf)};
|
||||
// function = std::make_shared<ngraph::Function>(results, param, "ShapeOf");
|
||||
// functionRefs = ngraph::clone_function(*function);
|
||||
// }
|
||||
//};
|
||||
//
|
||||
//TEST_P(ShapeOfLayerCPUTest, CompareWithRefs) {
|
||||
// SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||
// Run();
|
||||
// CheckPluginRelatedResults(executableNetwork, "ShapeOf");
|
||||
//}
|
||||
//
|
||||
//namespace {
|
||||
//
|
||||
///* CPU PARAMS */
|
||||
//std::vector<CPUSpecificParams> filterCPUInfoForDevice(const size_t dimsCount = 3) {
|
||||
// std::vector<CPUSpecificParams> resCPUParams;
|
||||
// if (dimsCount == 5) {
|
||||
// resCPUParams.push_back(CPUSpecificParams{{nCdhw16c}, {x}, {}, {}});
|
||||
// resCPUParams.push_back(CPUSpecificParams{{nCdhw8c}, {x}, {}, {}});
|
||||
// resCPUParams.push_back(CPUSpecificParams{{ncdhw}, {x}, {}, {}});
|
||||
// resCPUParams.push_back(CPUSpecificParams{{ndhwc}, {x}, {}, {}});
|
||||
// } else if (dimsCount == 4) {
|
||||
// resCPUParams.push_back(CPUSpecificParams{{nChw16c}, {x}, {}, {}});
|
||||
// resCPUParams.push_back(CPUSpecificParams{{nChw8c}, {x}, {}, {}});
|
||||
// resCPUParams.push_back(CPUSpecificParams{{nchw}, {x}, {}, {}});
|
||||
// resCPUParams.push_back(CPUSpecificParams{{nhwc}, {x}, {}, {}});
|
||||
// } else {
|
||||
// resCPUParams.push_back(CPUSpecificParams{{nCw16c}, {x}, {}, {}});
|
||||
// resCPUParams.push_back(CPUSpecificParams{{nCw8c}, {x}, {}, {}});
|
||||
// resCPUParams.push_back(CPUSpecificParams{{abc}, {x}, {}, {}});
|
||||
// resCPUParams.push_back(CPUSpecificParams{{acb}, {x}, {}, {}});
|
||||
// }
|
||||
//
|
||||
// return resCPUParams;
|
||||
//}
|
||||
//
|
||||
//const std::vector<InferenceEngine::Precision> netPrecisions = {
|
||||
// InferenceEngine::Precision::FP32,
|
||||
// InferenceEngine::Precision::BF16,
|
||||
// InferenceEngine::Precision::I32,
|
||||
// InferenceEngine::Precision::I8
|
||||
//};
|
||||
//
|
||||
//std::vector<std::pair<std::vector<ngraph::PartialShape>, std::vector<std::vector<ngraph::Shape>>>> inShapesDynamic3d = {
|
||||
// {{ngraph::PartialShape{-1, -1, -1}},
|
||||
// {{{ 8, 5, 4 }, { 8, 5, 3 }, { 8, 5, 2 }}}},
|
||||
// {{ngraph::PartialShape{-1, -1, -1}},
|
||||
// {{{ 1, 2, 4 }, { 1, 2, 3 }, { 1, 2, 2 }}}}
|
||||
//};
|
||||
//std::vector<std::pair<std::vector<ngraph::PartialShape>, std::vector<std::vector<ngraph::Shape>>>> inShapesDynamic4d = {
|
||||
// {{ngraph::PartialShape{-1, -1, -1, -1}},
|
||||
// {{{ 8, 5, 3, 4 }, { 8, 5, 3, 3 }, { 8, 5, 3, 2 }}}},
|
||||
// {{ngraph::PartialShape{-1, -1, -1, -1}},
|
||||
// {{{ 1, 2, 3, 4 }, { 1, 2, 3, 3 }, { 1, 2, 3, 2 }}}}
|
||||
//};
|
||||
//std::vector<std::pair<std::vector<ngraph::PartialShape>, std::vector<std::vector<ngraph::Shape>>>> inShapesDynamic5d = {
|
||||
// {{ngraph::PartialShape{-1, -1, -1, -1, -1}},
|
||||
// {{{ 8, 5, 3, 2, 4 }, { 8, 5, 3, 2, 3 }, { 8, 5, 3, 2, 2 }}}},
|
||||
// {{ngraph::PartialShape{-1, -1, -1, -1, -1}},
|
||||
// {{{ 1, 2, 3, 4, 4 }, { 1, 2, 3, 4, 3 }, { 1, 2, 3, 4, 2 }}}}
|
||||
//};
|
||||
//const auto params5dDynamic = ::testing::Combine(
|
||||
// ::testing::Combine(
|
||||
// ::testing::Combine(
|
||||
// ::testing::ValuesIn(inShapesDynamic5d)),
|
||||
// ::testing::ValuesIn(netPrecisions),
|
||||
// ::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||
// ::testing::ValuesIn(filterCPUInfoForDevice(5)));
|
||||
//
|
||||
//const auto params4dDynamic = ::testing::Combine(
|
||||
// ::testing::Combine(
|
||||
// ::testing::Combine(
|
||||
// ::testing::ValuesIn(inShapesDynamic4d)),
|
||||
// ::testing::ValuesIn(netPrecisions),
|
||||
// ::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||
// ::testing::ValuesIn(filterCPUInfoForDevice(4)));
|
||||
//
|
||||
//const auto params3dDynamic = ::testing::Combine(
|
||||
// ::testing::Combine(
|
||||
// ::testing::Combine(
|
||||
// ::testing::ValuesIn(inShapesDynamic3d)),
|
||||
// ::testing::ValuesIn(netPrecisions),
|
||||
// ::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||
// ::testing::ValuesIn(filterCPUInfoForDevice(3)));
|
||||
//
|
||||
//// We don't check static case, because of constant folding
|
||||
//INSTANTIATE_TEST_SUITE_P(smoke_ShapeOf3dDynamicLayoutTest, ShapeOfLayerCPUTest,
|
||||
// params3dDynamic, ShapeOfLayerCPUTest::getTestCaseName);
|
||||
//INSTANTIATE_TEST_SUITE_P(smoke_ShapeOf4dDynamicLayoutTest, ShapeOfLayerCPUTest,
|
||||
// params4dDynamic, ShapeOfLayerCPUTest::getTestCaseName);
|
||||
//INSTANTIATE_TEST_SUITE_P(smoke_ShapeOf5dDynamicLayoutTest, ShapeOfLayerCPUTest,
|
||||
// params5dDynamic, ShapeOfLayerCPUTest::getTestCaseName);
|
||||
//} // namespace
|
||||
//} // namespace CPULayerTestsDefinitions
|
@ -4,6 +4,7 @@
|
||||
|
||||
#include "cpu_test_utils.hpp"
|
||||
#include "utils/rt_info/memory_formats_attribute.hpp"
|
||||
#include <cstdint>
|
||||
|
||||
namespace CPUTestUtils {
|
||||
|
||||
@ -257,6 +258,8 @@ CPUTestsBase::makeCPUInfo(std::vector<cpu_memory_format_t> inFmts, std::vector<c
|
||||
cpuInfo.insert({"PrimitivesPriority", std::make_shared<ngraph::VariantWrapper<std::string>>(impls2str(priority))});
|
||||
}
|
||||
|
||||
cpuInfo.insert({"enforceBF16evenForGraphTail", ov::make_variant<int64_t>(true)});
|
||||
|
||||
return cpuInfo;
|
||||
}
|
||||
|
||||
|
@ -93,5 +93,7 @@ std::vector<std::string> disabledTestPatterns() {
|
||||
R"(.*CanSetOutBlobWithDifferentPrecision/netPRC=BIN.*)",
|
||||
// TODO: Issue: 67486
|
||||
R"(.*(EltwiseLayerTest|SoftMaxLayerTest).*)",
|
||||
// TODO: Issue: 68712
|
||||
R"(.*.MatMul.*CompareWithRefs.*IS0=\(1.5\)_IS1=\(1.5\).*transpose_a=0.*transpose_b=1.*CONSTANT.*FP16.*UNSPECIFIED.*UNSPECIFIED.*ANY.*)",
|
||||
};
|
||||
}
|
||||
|
@ -29,21 +29,19 @@ void scatter_elem_update(const DataType* input_data,
|
||||
// output[i][indices[i][j][k]][k] = updates[i][j][k] if axis = 1,
|
||||
// output[i][j][indices[i][j][k]] = updates[i][j][k] if axis = 2
|
||||
|
||||
NGRAPH_SUPPRESS_DEPRECATED_START
|
||||
CoordinateTransform indices_transform{indices_shape};
|
||||
CoordinateTransform data_transform{data_shape};
|
||||
CoordinateTransformBasic indices_transform{indices_shape};
|
||||
CoordinateTransformBasic data_transform{data_shape};
|
||||
const auto indices_strides = row_major_strides(indices_shape);
|
||||
const auto data_strides = row_major_strides(data_shape);
|
||||
|
||||
for (const Coordinate& indices_cord : indices_transform) {
|
||||
const size_t indices_idx = indices_transform.index(indices_cord);
|
||||
const size_t indices_idx =
|
||||
std::inner_product(indices_cord.begin(), indices_cord.end(), indices_strides.begin(), 0);
|
||||
Coordinate out_cord(indices_cord);
|
||||
out_cord.at(axis) = indices[indices_idx];
|
||||
NGRAPH_CHECK(data_transform.has_source_coordinate(out_cord),
|
||||
"Provided index coordinates are out of input data bounds: ",
|
||||
out_cord,
|
||||
".");
|
||||
out_buf[data_transform.index(out_cord)] = updates[indices_idx];
|
||||
const auto out_idx = std::inner_product(out_cord.begin(), out_cord.end(), data_strides.begin(), 0);
|
||||
out_buf[out_idx] = updates[indices_idx];
|
||||
}
|
||||
NGRAPH_SUPPRESS_DEPRECATED_END
|
||||
}
|
||||
} // namespace reference
|
||||
} // namespace runtime
|
||||
|
@ -81,7 +81,7 @@ void op::v1::Reshape::validate_and_infer_types() {
|
||||
std::tie(lb, ub) = evaluate_both_bounds(get_input_source_output(1));
|
||||
if (lb && ub) {
|
||||
const auto lower_bound = std::make_shared<op::v0::Constant>(lb)->cast_vector<int64_t>();
|
||||
const auto upper_bound = std::make_shared<op::v0::Constant>(ub)->cast_vector<int64_t>();
|
||||
auto upper_bound = std::make_shared<op::v0::Constant>(ub)->cast_vector<int64_t>();
|
||||
shape_can_be_calculated = true;
|
||||
NGRAPH_CHECK(lower_bound.size() == upper_bound.size());
|
||||
for (size_t i = 0; i < lower_bound.size(); ++i) {
|
||||
@ -94,6 +94,13 @@ void op::v1::Reshape::validate_and_infer_types() {
|
||||
NODE_VALIDATION_CHECK(this, minus_one_idx == -1, "More than one dimension has size of -1");
|
||||
minus_one_idx = static_cast<int64_t>(i);
|
||||
}
|
||||
|
||||
// We must handle i32 fully dynamic dimension in a special way
|
||||
if (get_input_element_type(1) == element::i32 &&
|
||||
upper_bound[i] == std::numeric_limits<std::int32_t>::max()) {
|
||||
upper_bound[i] = std::numeric_limits<std::int64_t>::max();
|
||||
}
|
||||
|
||||
reshape_pattern.emplace_back(lower_bound[i], upper_bound[i]);
|
||||
}
|
||||
// For scalar case reshape_patter should be empty but scalar reshape pattern should be empty
|
||||
|
@ -561,3 +561,17 @@ TEST(type_prop, reshape_to_scalar_3) {
|
||||
make_shared<op::v1::Reshape>(param, op::Constant::create(element::i64, {}, std::vector<int64_t>{100}), false),
|
||||
std::exception);
|
||||
}
|
||||
|
||||
TEST(type_prop, dynamic_shape_propagation_with_i32_precision) {
|
||||
auto param = make_shared<op::Parameter>(element::f32, PartialShape{1, -1, -1});
|
||||
auto shape_of = std::make_shared<op::v3::ShapeOf>(param, element::i32);
|
||||
|
||||
auto indices = op::Constant::create(element::i32, {3}, {1, 2, 0});
|
||||
auto axis = op::Constant::create(element::i32, {1}, {0});
|
||||
auto gather = std::make_shared<op::v1::Gather>(shape_of, indices, axis);
|
||||
|
||||
auto reshape = std::make_shared<op::v1::Reshape>(param, gather, true);
|
||||
|
||||
ASSERT_EQ(reshape->get_element_type(), element::f32);
|
||||
ASSERT_EQ(reshape->get_output_partial_shape(0), (PartialShape{-1, -1, 1}));
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user