[CPU] Added unit tests for custom shape infer (#17162)

This commit is contained in:
Yuan Hu 2023-08-03 16:41:27 +08:00 committed by GitHub
parent e64f84d88e
commit d445f9ec95
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
110 changed files with 3678 additions and 989 deletions

View File

@ -44,7 +44,7 @@ public:
Shape(const VectorDims& minDims, const VectorDims& maxDims) {
if (minDims.size() != maxDims.size()) {
IE_THROW() << "Can't create shape due to min/max vectors dims size mismatch";
OPENVINO_THROW("Can't create shape due to min/max vectors dims size mismatch");
}
this->minDims = minDims;
this->maxDims = maxDims;
@ -114,7 +114,7 @@ public:
*/
const VectorDims& getStaticDims() const {
if (type != ShapeType::Static) {
IE_THROW() << "Cannot get dims for non static shape";
OPENVINO_THROW("Cannot get dims for non static shape");
}
return minDims;
@ -155,7 +155,7 @@ public:
size_t getElementsCount() const {
if (type != ShapeType::Static) {
IE_THROW() << "Cannot get elements count for non static shape";
OPENVINO_THROW("Cannot get elements count for non static shape");
}
size_t size = 1;

View File

@ -32,7 +32,7 @@
#include "nodes/node_config.h"
#include "cache/multi_cache.h"
#include <utils/shape_inference/shape_inference_cpu.hpp>
#include <shape_inference/shape_inference_cpu.hpp>
#include "utils/debug_capabilities.h"
#include "utils/bit_util.hpp"

View File

@ -14,6 +14,7 @@
#include <utils/bfloat16.hpp>
#include <utils/general_utils.h>
#include <vector>
#include "shape_inference/custom/adaptive_pooling.hpp"
using namespace InferenceEngine;
using namespace dnnl;
@ -23,57 +24,6 @@ namespace ov {
namespace intel_cpu {
namespace node {
namespace {
/**
* Implements Adaptive Pooling shape inference algorithm. The output tensor shape consists of the input [N, C] dimensions and
* the [D_out, H_out, W_out] dimensions, which are placed in the second input parameter.
*
*/
class AdaptivePoolingShapeInfer : public ShapeInferEmptyPads {
public:
explicit AdaptivePoolingShapeInfer(size_t outputs_count) : m_outputs_count(outputs_count) {}
Result infer(
const std::vector<std::reference_wrapper<const VectorDims>>& input_shapes,
const std::unordered_map<size_t, MemoryPtr>& data_dependency) override {
const auto& inputDims = input_shapes[0].get();
const auto& spatialDims = input_shapes[1].get();
const auto inputRank = inputDims.size();
const auto spatialDimsSize = spatialDims[0];
VectorDims outputDims(inputRank);
outputDims[0] = inputDims[0];
outputDims[1] = inputDims[1];
auto newSpatialDimsPtr = reinterpret_cast<int32_t *>(data_dependency.at(1)->getData());
for (size_t i = 0; i < spatialDimsSize; i++) {
outputDims[i + 2] = newSpatialDimsPtr[i];
}
std::vector<VectorDims> result(m_outputs_count, outputDims);
return {std::move(result), ShapeInferStatus::success};
}
port_mask_t get_port_mask() const override {
return PortMask(1);
}
private:
size_t m_outputs_count;
};
class AdaptivePoolingShapeInferFactory : public ShapeInferFactory {
public:
AdaptivePoolingShapeInferFactory(std::shared_ptr<ov::Node> op) : m_op(op) {}
ShapeInferPtr makeShapeInfer() const override {
size_t outputs_count = m_op->get_output_size();
return std::make_shared<AdaptivePoolingShapeInfer>(outputs_count);
}
private:
std::shared_ptr<ov::Node> m_op;
};
} // namespace
bool AdaptivePooling::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept {
try {
if (one_of(op->get_type_info(), ngraph::op::v8::AdaptiveAvgPool::get_type_info_static())) {

View File

@ -7,7 +7,7 @@
#include <algorithm>
#include <ngraph/opsets/opset3.hpp>
#include <utils/shape_inference/shape_inference_pass_through.hpp>
#include <shape_inference/shape_inference_pass_through.hpp>
#include "ie_parallel.hpp"
#include "bucketize.h"

View File

@ -11,6 +11,7 @@
#include <openvino/core/type.hpp>
#include <ie/ie_parallel.hpp>
#include "kernels/x64/jit_kernel.hpp"
#include "shape_inference/custom/color_convert.hpp"
using namespace InferenceEngine;
using namespace dnnl::impl;
@ -971,45 +972,6 @@ public:
#endif
} // namespace i420
/**
* Implements Color Convert shape inference algorithm. Depending on wether it has only single plain H dimension is
* passed through or recalculated as 2/3 of the initial size.
*
*/
class ColorConvertShapeInfer : public ShapeInferEmptyPads {
public:
ColorConvertShapeInfer(bool singlePlain) : m_singlePlain(singlePlain) {}
Result infer(const std::vector<std::reference_wrapper<const VectorDims>>& input_shapes,
const std::unordered_map<size_t, MemoryPtr>& data_dependency) override {
const auto& dims = input_shapes.front().get();
if (dims.size() != 4)
IE_THROW() <<"NV12Converter node has incorrect input dimensions";
return { m_singlePlain
? std::vector<VectorDims>{ { dims[Converter::N_DIM], dims[Converter::H_DIM] * 2 / 3, dims[Converter::W_DIM], 3 } }
: std::vector<VectorDims>{ { dims[Converter::N_DIM], dims[Converter::H_DIM], dims[Converter::W_DIM], 3 } },
ShapeInferStatus::success };
}
port_mask_t get_port_mask() const override {
return EMPTY_PORT_MASK;
}
private:
bool m_singlePlain = false;
};
class ColorConvertShapeInferFactory : public ShapeInferFactory {
public:
ColorConvertShapeInferFactory(std::shared_ptr<ov::Node> op) : m_op(op) {}
ShapeInferPtr makeShapeInfer() const override {
bool isSinglePlain = m_op->get_input_size() == 1;
return std::make_shared<ColorConvertShapeInfer>(isSinglePlain);
}
private:
std::shared_ptr<ov::Node> m_op;
};
} // namespace
ColorConvert::Converter::Converter(Node *node, const ColorFormat & colorFormat)

View File

@ -8,7 +8,7 @@
#include <ngraph/opsets/opset1.hpp>
#include <ie_ngraph_utils.hpp>
#include <utils/ngraph_utils.hpp>
#include <utils/shape_inference/shape_inference_pass_through.hpp>
#include <shape_inference/shape_inference_pass_through.hpp>
using namespace dnnl;
using namespace InferenceEngine;

View File

@ -21,7 +21,7 @@
#include <common/primitive_hashing_utils.hpp>
#include <common/primitive_desc.hpp>
#include <common/primitive_desc_iface.hpp>
#include <utils/shape_inference/shape_inference_ngraph.hpp>
#include <shape_inference/shape_inference_ngraph.hpp>
#if defined(OV_CPU_WITH_ACL)
#include "executors/acl/acl_utils.hpp"

View File

@ -46,6 +46,7 @@
#include <map>
#include <functional>
#include "memory_desc/dnnl_blocked_memory_desc.h"
#include "shape_inference/custom/eltwise.hpp"
using namespace InferenceEngine;
using namespace dnnl::impl::utils;
@ -927,64 +928,6 @@ private:
#endif // OPENVINO_ARCH_X86_64
namespace {
/**
* Implements Eltwise shape inference algorithm. The algorithm is based on broadcasting all the input shapes
* according to the NUMPY broadcast rule. This implementation is more lightweight than the ngraph one.
*
*/
class EltwiseShapeInfer : public ShapeInferEmptyPads {
public:
Result infer(
const std::vector<std::reference_wrapper<const VectorDims>>& input_shapes,
const std::unordered_map<size_t, MemoryPtr>& data_dependency) override {
size_t max_rank = 0;
size_t max_rank_idx = 0;
for (size_t i = 0; i < input_shapes.size(); ++i) {
auto item_rank = input_shapes[i].get().size();
if (item_rank > max_rank) {
max_rank = item_rank;
max_rank_idx = i;
}
}
auto output_shape = input_shapes[max_rank_idx].get();
// use NUMPY broadcast rule
for (size_t i = 0; i < input_shapes.size(); i++) {
if (i == max_rank_idx)
continue;
auto& input_shape = input_shapes[i].get();
if (input_shape.size() > output_shape.size()) {
IE_THROW() << "Eltwise shape infer input and output shapes rank mismatch";
}
size_t offset = output_shape.size() - input_shape.size();
for (size_t j = 0; j < input_shape.size(); ++j) {
if (input_shape[j] != output_shape[offset + j]) {
if (output_shape[offset + j] == 1) {
output_shape[offset + j] = input_shape[j];
} else {
if (input_shape[j] != 1) IE_THROW() << "Eltwise shape infer input shapes dim index: " << j << " mismatch";
}
}
}
}
return { { std::move(output_shape) }, ShapeInferStatus::success };
}
port_mask_t get_port_mask() const override {
return EMPTY_PORT_MASK;
}
};
class EltwiseShapeInferFactory : public ShapeInferFactory {
public:
ShapeInferPtr makeShapeInfer() const override {
return std::make_shared<EltwiseShapeInfer>();
}
};
} // namespace
Eltwise::BroadcastingPolicy Eltwise::determineBroadcastingPolicy(const std::shared_ptr<ngraph::Node>& op) {
const auto const1 = ov::as_type_ptr<ngraph::opset1::Constant>(op->get_input_node_shared_ptr(0));
const auto const2 = ov::as_type_ptr<ngraph::opset1::Constant>(op->get_input_node_shared_ptr(1));

View File

@ -6,7 +6,7 @@
#include <ie_ngraph_utils.hpp>
#include <utils/bfloat16.hpp>
#include <ie_parallel.hpp>
#include <utils/shape_inference/shape_inference_ngraph.hpp>
#include <shape_inference/shape_inference_ngraph.hpp>
#define THROW_ERROR IE_THROW() << NameFromType(getType()) << " node with name '" << getName() << "' "

View File

@ -23,7 +23,7 @@
#include "memory_desc/dnnl_blocked_memory_desc.h"
#include "common/cpu_memcpy.h"
#include <common/primitive_hashing_utils.hpp>
#include <utils/shape_inference/shape_inference_pass_through.hpp>
#include <shape_inference/shape_inference_pass_through.hpp>
#include <ngraph/opsets/opset1.hpp>
#include "utils/ngraph_utils.hpp"

View File

@ -26,6 +26,7 @@
#include "common/primitive_desc.hpp"
#include "common/primitive_desc_iface.hpp"
#include "common/cpu_convert.h"
#include "shape_inference/custom/fullyconnected.hpp"
#include <string>
#include <vector>
@ -92,53 +93,6 @@ bool FCKey::operator==(const FCKey &rhs) const {
return retVal;
}
class FCShapeInfer : public ShapeInferEmptyPads {
public:
FCShapeInfer(size_t outPut_rank) : out_rank(outPut_rank) {}
Result infer(
const std::vector<std::reference_wrapper<const VectorDims>>& input_shapes,
const std::unordered_map<size_t, MemoryPtr>& data_dependency) override {
const VectorDims& activationShape = input_shapes[0].get();
const VectorDims& weightShape = input_shapes[1].get();
size_t activationRank = activationShape.size();
size_t channelRank = weightShape.size() - 1;
// activation weight output_shape
// NCHW CoCHW NCo
// TNC CoC TNCo
// NC CoC NCo
VectorDims outputShape(out_rank, 1);
// set Co
outputShape.back() = weightShape[0];
// set batch dims
size_t batchRank = activationRank - channelRank;
size_t startIdx = out_rank - batchRank - 1;
for (size_t i = 0; i < batchRank; i++) {
outputShape[i + startIdx] = activationShape[i];
}
return {{std::move(outputShape)}, ShapeInferStatus::success};
}
port_mask_t get_port_mask() const override {
return EMPTY_PORT_MASK;
}
private:
size_t out_rank = 0;
};
class FCShapeInferFactory : public ShapeInferFactory {
public:
FCShapeInferFactory(std::shared_ptr<ov::Node> op) : m_op(op) {}
ShapeInferPtr makeShapeInfer() const override {
return std::make_shared<FCShapeInfer>(m_op->get_output_partial_shape(0).rank().get_length());
}
private:
std::shared_ptr<const ngraph::Node> m_op;
};
} // namespace
bool FullyConnected::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept {

View File

@ -11,8 +11,8 @@
#include "common/cpu_memcpy.h"
#include <utils/general_utils.h>
#include "kernels/x64/gather_uni_kernel.hpp"
#include "utils/shape_inference/shape_inference_cpu.hpp"
#include <partitioned_mem_mgr.h>
#include "shape_inference/custom/gather.hpp"
using namespace InferenceEngine;
using namespace dnnl::impl::cpu;
@ -43,76 +43,6 @@ bool Gather::isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std
return true;
}
namespace {
class GatherShapeInfer : public ShapeInferEmptyPads {
public:
GatherShapeInfer(bool isAxisInputConst, bool isIndicesScalar, int axis, int batchDims) : m_isAxisInputConst(isAxisInputConst),
m_isIndicesScalar(isIndicesScalar), m_axis(axis), m_batchDims(batchDims) {}
Result infer(const std::vector<std::reference_wrapper<const VectorDims>>& input_shapes,
const std::unordered_map<size_t, MemoryPtr>& data_dependency) override {
static constexpr size_t GATHER_DATA = 0, GATHER_INDICES = 1, GATHER_AXIS = 2;
const auto& input_shape = input_shapes[GATHER_DATA].get();
// Use VectorDims{} instead of {1} for Scalar
const auto& indices_shape = m_isIndicesScalar ? VectorDims{} : input_shapes[GATHER_INDICES].get();
if (!m_isAxisInputConst) {
if (data_dependency.at(GATHER_AXIS)->getDesc().getPrecision() != Precision::I32) {
IE_THROW() << "Unsupported precision " << data_dependency.at(GATHER_AXIS)->getDesc().getPrecision()
<< " for axis tensor.";
}
m_axis = reinterpret_cast<const int32_t *>(data_dependency.at(GATHER_AXIS)->getData())[0];
}
if (m_axis < 0)
m_axis += input_shape.size();
if (m_batchDims < 0)
m_batchDims += indices_shape.size();
VectorDims output_shape;
output_shape.reserve(input_shape.size() + indices_shape.size() - m_batchDims - 1);
output_shape.insert(output_shape.end(), input_shape.begin(), input_shape.begin() + m_axis);
output_shape.insert(output_shape.end(), indices_shape.begin() + m_batchDims, indices_shape.end());
output_shape.insert(output_shape.end(), input_shape.begin() + m_axis + 1, input_shape.end());
return {{std::move(output_shape)}, ShapeInferStatus::success};
}
port_mask_t get_port_mask() const override {
return PortMask(2);
}
private:
bool m_isAxisInputConst = false;
bool m_isIndicesScalar = false;
int m_axis = 0;
int m_batchDims = 0;
};
class GatherShapeInferFactory : public ShapeInferFactory {
public:
GatherShapeInferFactory(std::shared_ptr<ov::Node> op) : m_op(op) {}
ShapeInferPtr makeShapeInfer() const override {
static constexpr size_t GATHER_INDICES = 1, GATHER_AXIS = 2;
bool isAxisInputConst = ov::is_type<ov::op::v0::Constant>(m_op->get_input_node_ptr(GATHER_AXIS));
const auto& indicesShape = m_op->get_input_partial_shape(GATHER_INDICES);
if (!indicesShape.rank().is_static())
IE_THROW() << "indicesShape do not support dynamic rank.";
bool isIndicesScalar = indicesShape.rank().get_length() == 0;
int axis = isAxisInputConst ? ov::as_type<ov::op::v0::Constant>(m_op->get_input_node_ptr(GATHER_AXIS))->cast_vector<int>()[0] : 0;
int batchDims = ov::is_type<ov::op::v8::Gather>(m_op) ? static_cast<int>(ov::as_type_ptr<ov::op::v8::Gather>(m_op)->get_batch_dims()) : (
ov::is_type<ov::op::v7::Gather>(m_op) ? static_cast<int>(ov::as_type_ptr<ov::op::v7::Gather>(m_op)->get_batch_dims()) : 0);
return std::make_shared<GatherShapeInfer>(isAxisInputConst, isIndicesScalar, axis, batchDims);
}
private:
std::shared_ptr<ov::Node> m_op;
};
} // namespace
Gather::Gather(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context)
: Node(op, context, GatherShapeInferFactory(op)),
batchDims(0) {

View File

@ -18,7 +18,7 @@
#include "ie_parallel.hpp"
#include "common/cpu_memcpy.h"
#include "generate_proposals.h"
#include <utils/shape_inference/shape_inference_internal_dyn.hpp>
#include <shape_inference/shape_inference_internal_dyn.hpp>
namespace ov {
namespace intel_cpu {

View File

@ -8,7 +8,7 @@
#include "ie_ngraph_utils.hpp"
#include "transformations/utils/utils.hpp"
#include "common/cpu_memcpy.h"
#include <utils/shape_inference/shape_inference_internal_dyn.hpp>
#include <shape_inference/shape_inference_internal_dyn.hpp>
#include <string>
#include <vector>

View File

@ -21,7 +21,7 @@
#include "utils/cpu_utils.hpp"
#include <cpu/x64/jit_generator.hpp>
#include "memory_desc/dnnl_blocked_memory_desc.h"
#include "utils/shape_inference/shape_inference_pass_through.hpp"
#include "shape_inference/shape_inference_pass_through.hpp"
using namespace dnnl;
using namespace InferenceEngine;

View File

@ -26,11 +26,11 @@
#include <ngraph/opsets/opset1.hpp>
#include <ngraph/opsets/opset4.hpp>
#include <ngraph/opsets/opset11.hpp>
#include <utils/shape_inference/static_shape.hpp>
#include <utils/shape_inference/shape_inference.hpp>
#include <shape_inference/static_shape.hpp>
#include <shape_inference/shape_inference.hpp>
#include <ie_ngraph_utils.hpp>
#include "utils/cpu_utils.hpp"
#include <utils/shape_inference/shape_inference_ngraph.hpp>
#include <shape_inference/shape_inference_ngraph.hpp>
using namespace dnnl;
using namespace InferenceEngine;

View File

@ -9,7 +9,7 @@
#include <memory_desc/cpu_memory_desc_utils.h>
#include "memory_desc/dnnl_blocked_memory_desc.h"
#include <common/primitive_hashing_utils.hpp>
#include <utils/shape_inference/shape_inference_pass_through.hpp>
#include <shape_inference/shape_inference_pass_through.hpp>
#include <memory>
#include <string>

View File

@ -10,7 +10,7 @@
#include "ie_parallel.hpp"
#include "mathematics.h"
#include "utils/general_utils.h"
#include <utils/shape_inference/shape_inference_pass_through.hpp>
#include <shape_inference/shape_inference_pass_through.hpp>
using namespace InferenceEngine;

View File

@ -22,7 +22,7 @@
#include <dnnl_extension_utils.h>
#include <common/primitive_hashing_utils.hpp>
#include <cpu/x64/cpu_isa_traits.hpp>
#include "shape_inference/custom/matmul.hpp"
using namespace dnnl;
using namespace InferenceEngine;
@ -114,80 +114,6 @@ bool MatMul::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op,
return true;
}
namespace {
class MMShapeInfer : public ShapeInferEmptyPads {
public:
MMShapeInfer(const size_t& out_rank, const bool& transpose_a, const bool& transpose_b) :
m_out_rank(out_rank), m_transpose_a(transpose_a), m_transpose_b(transpose_b) {
m_shapeY = VectorDims(m_out_rank, 1); // for output and cache
}
Result infer(
const std::vector<std::reference_wrapper<const VectorDims>>& input_shapes,
const std::unordered_map<size_t, MemoryPtr>& data_dependency) override {
const VectorDims& shapeA = input_shapes[0].get();
const VectorDims& shapeB = input_shapes[1].get();
const size_t rankA = shapeA.size();
const size_t rankB = shapeB.size();
// getSupportedDescriptors has done some shape check.
// 1. Needn't assert the scalar type since the matmul_shape_inference has checked.
// 2. Needn't check the compatibility of the last two dims
// 3. 1-D x 1-D is needed
// 4. transpose is necessary
// 5. Just support the same rank of matmul
// 6. simplify the broadcast check
if (rankA == 1 && rankB == 1 && shapeA[0] == shapeB[0]) {
return {{m_shapeY}, ShapeInferStatus::success};
}
m_shapeY[m_out_rank-2] = m_transpose_a ? shapeA[rankA-1] : shapeA[rankA-2];
m_shapeY[m_out_rank-1] = m_transpose_b ? shapeB[rankB-2] : shapeB[rankB-1];
for (size_t i=0; i < m_out_rank-2; ++i) {
if (shapeA[i] != shapeB[i]) {
if (shapeB[i] == 1) {
m_shapeY[i] = shapeA[i];
continue;
} else if (shapeA[i] != 1) {
IE_THROW() << "Incompatible MatMul batch dimension. Cant merge the first input dimension=" <<
shapeA[i] << " with second input dimension=" << shapeB[i] << " at index=" << i;
}
}
m_shapeY[i] = shapeB[i];
}
return {{m_shapeY}, ShapeInferStatus::success};
}
port_mask_t get_port_mask() const override {
return EMPTY_PORT_MASK;
}
private:
VectorDims m_shapeY;
const size_t m_out_rank;
const bool m_transpose_a;
const bool m_transpose_b;
};
class MMShapeInferFactory : public ShapeInferFactory {
public:
MMShapeInferFactory(const std::shared_ptr<ngraph::Node>& op) : m_op(op) {}
ShapeInferPtr makeShapeInfer() const override {
if (const auto matmul = ov::as_type_ptr<const ngraph::opset1::MatMul>(m_op)) {
const auto output_rank = matmul->get_output_partial_shape(0).rank().get_length();
const bool transpose_a = matmul->get_transpose_a();
const bool transpose_b = matmul->get_transpose_b();
return std::make_shared<MMShapeInfer>(output_rank, transpose_a, transpose_b);
} else {
IE_THROW() << "Unexpected operation type in the MatMul shape inference factory";
}
}
private:
std::shared_ptr<ngraph::Node> m_op;
};
} // namespace
MatMul::MatMul(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context) :
Node(op, context, MMShapeInferFactory(op)), withBiases(false) {
std::string errorMessage;

View File

@ -14,7 +14,7 @@
#include "ie_parallel.hpp"
#include "ngraph/opsets/opset8.hpp"
#include "utils/general_utils.h"
#include <utils/shape_inference/shape_inference_internal_dyn.hpp>
#include <shape_inference/shape_inference_internal_dyn.hpp>
using namespace InferenceEngine;

View File

@ -17,7 +17,7 @@
#include "ie_parallel.hpp"
#include "utils/general_utils.h"
#include <utils/shape_inference/shape_inference_internal_dyn.hpp>
#include <shape_inference/shape_inference_internal_dyn.hpp>
using namespace InferenceEngine;

View File

@ -9,43 +9,11 @@
#include "ie_parallel.hpp"
#include "common/cpu_memcpy.h"
#include "transformations/cpu_opset/common/op/ngram.hpp"
#include "shape_inference/custom/ngram.hpp"
namespace ov {
namespace intel_cpu {
namespace node {
namespace {
class NgramShapeInfer : public ShapeInferEmptyPads {
public:
NgramShapeInfer(const size_t k) : m_k(k) {}
Result infer(
const std::vector<std::reference_wrapper<const VectorDims>>& input_shapes,
const std::unordered_map<size_t, MemoryPtr>& data_dependency) override {
auto output_shape = input_shapes[0].get();
output_shape[1] *= m_k;
return {{std::move(output_shape)}, ShapeInferStatus::success};
}
port_mask_t get_port_mask() const override {
return EMPTY_PORT_MASK;
}
private:
size_t m_k;
};
class NgramShapeInferFactory : public ShapeInferFactory {
public:
NgramShapeInferFactory(const std::shared_ptr<ov::Node>& op) : m_op(op) {}
ShapeInferPtr makeShapeInfer() const override {
auto ngram = ov::as_type_ptr<NgramNode>(m_op);
if (!ngram) {
IE_THROW(Unexpected) << "Wrong operation type";
}
return std::make_shared<NgramShapeInfer>(ngram->get_k());
}
private:
std::shared_ptr<ov::Node> m_op;
};
} // namespace
bool Ngram::isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept {
try {

View File

@ -18,7 +18,7 @@
#include "cpu/x64/jit_generator.hpp"
#include "emitters/x64/jit_load_store_emitters.hpp"
#include <cpu/x64/injectors/jit_uni_eltwise_injector.hpp>
#include <utils/shape_inference/shape_inference_internal_dyn.hpp>
#include <shape_inference/shape_inference_internal_dyn.hpp>
using namespace InferenceEngine;
using namespace dnnl;

View File

@ -9,7 +9,7 @@
#include <ie_parallel.hpp>
#include <ngraph/opsets/opset3.hpp>
#include <utils/bfloat16.hpp>
#include <utils/shape_inference/shape_inference_internal_dyn.hpp>
#include <shape_inference/shape_inference_internal_dyn.hpp>
using namespace InferenceEngine;

View File

@ -23,7 +23,7 @@
#include "memory_desc/dnnl_blocked_memory_desc.h"
#include "utils/cpu_utils.hpp"
#include <common/primitive_hashing_utils.hpp>
#include <utils/shape_inference/shape_inference_pass_through.hpp>
#include <shape_inference/shape_inference_pass_through.hpp>
using namespace dnnl;
using namespace InferenceEngine;

View File

@ -11,9 +11,8 @@
#include <nodes/common/blocked_desc_creator.h>
#include <ngraph/opsets/opset1.hpp>
#include <ie_ngraph_utils.hpp>
#include <utils/shape_inference/static_shape.hpp>
#include <utils/shape_inference/shape_inference.hpp>
#include "common/cpu_memcpy.h"
#include "shape_inference/custom/one_hot.hpp"
using namespace InferenceEngine;
@ -21,58 +20,6 @@ namespace ov {
namespace intel_cpu {
namespace node {
namespace {
/**
* Implements One Hot shape inference algorithm. The output shape is the input `indices` tensor shape, where a new axis
* of size `depth` is inserted at the dimension defined by the `axis` parameter.
*
*/
class OneHotShapeInfer : public ShapeInferEmptyPads {
public:
explicit OneHotShapeInfer(int64_t axis) : m_axis(axis) {}
Result infer(
const std::vector<std::reference_wrapper<const VectorDims>>& input_shapes,
const std::unordered_map<size_t, MemoryPtr>& data_dependency) override {
auto depth = reinterpret_cast<int32_t *>(data_dependency.at(1)->getData())[0];
auto result = input_shapes.front().get();
result.insert(result.begin() + m_axis, depth);
return {{std::move(result)}, ShapeInferStatus::success};
}
port_mask_t get_port_mask() const override {
return PortMask(1);
}
private:
int64_t m_axis = 0;
};
class OneHotShapeInferFactory : public ShapeInferFactory {
public:
OneHotShapeInferFactory(std::shared_ptr<ov::Node> op) : m_op(op) {}
ShapeInferPtr makeShapeInfer() const override {
auto oneHot = ov::as_type_ptr<const ngraph::opset1::OneHot>(m_op);
if (!oneHot) {
IE_THROW() << "Unexpected op type in OneHot shape inference factory: " << m_op->get_type_name();
}
auto axis = oneHot->get_axis();
auto dstShape = oneHot->get_output_partial_shape(0);
int output_dims_size = dstShape.size();
if (0 == output_dims_size) output_dims_size = 1;
if (axis < 0) {
axis += output_dims_size;
}
return std::make_shared<OneHotShapeInfer>(axis);
}
private:
std::shared_ptr<ov::Node> m_op;
};
} // namespace
bool OneHot::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept {
try {
const auto oneHot = std::dynamic_pointer_cast<const ngraph::opset1::OneHot>(op);

View File

@ -13,6 +13,7 @@
#include <dnnl_types.h>
#include <ngraph/ngraph.hpp>
#include <ngraph/opsets/opset1.hpp>
#include "shape_inference/custom/priorbox.hpp"
using namespace InferenceEngine;
@ -22,49 +23,6 @@ namespace ov {
namespace intel_cpu {
namespace node {
namespace {
class PriorBoxShapeInfer : public ShapeInferEmptyPads {
/**
* Implements Prior Box Clustered shape inference algorithm. The output shape is [2, 4 * height * width * number_of_priors].
* `number_of_priors` is an attribute of the operation. heigh and width are in the the first input parameter.
*
*/
public:
explicit PriorBoxShapeInfer(int64_t number_of_priors) : m_number_of_priors(number_of_priors) {}
Result infer(
const std::vector<std::reference_wrapper<const VectorDims>>& input_shapes,
const std::unordered_map<size_t, MemoryPtr>& data_dependency) override {
const int* in_data = reinterpret_cast<const int*>(data_dependency.at(0)->getData());
const int H = in_data[0];
const int W = in_data[1];
const auto output = static_cast<size_t>(4 * H * W * m_number_of_priors);
return {{{2, output}}, ShapeInferStatus::success};
}
port_mask_t get_port_mask() const override {
return PortMask(0);
}
private:
int64_t m_number_of_priors = 0;
};
class PriorBoxShapeInferFactory : public ShapeInferFactory {
public:
explicit PriorBoxShapeInferFactory(std::shared_ptr<ov::Node> op) : m_op(op) {}
ShapeInferPtr makeShapeInfer() const override {
auto priorBox = ov::as_type_ptr<const ngraph::opset1::PriorBox>(m_op);
if (!priorBox) {
IE_THROW() << "Unexpected op type in PriorBox shape inference factory: " << m_op->get_type_name();
}
const auto& attrs = priorBox->get_attrs();
auto number_of_priors = ngraph::opset1::PriorBox::number_of_priors(attrs);
return std::make_shared<PriorBoxShapeInfer>(number_of_priors);
}
private:
std::shared_ptr<ov::Node> m_op;
};
float clip_great(float x, float threshold) {
return x < threshold ? x : threshold;
}

View File

@ -13,59 +13,13 @@
#include <dnnl_types.h>
#include <ngraph/ngraph.hpp>
#include <ngraph/opsets/opset1.hpp>
#include "shape_inference/custom/priorbox_clustered.hpp"
using namespace InferenceEngine;
namespace ov {
namespace intel_cpu {
namespace node {
namespace {
/**
* Implements Prior Box Clustered shape inference algorithm. The output shape is [2, 4 * height * width * number_of_priors].
* `number_of_priors` is an attribute of the operation. heigh and width are in the the first input parameter.
*
*/
class PriorBoxClusteredShapeInfer : public ShapeInferEmptyPads {
public:
explicit PriorBoxClusteredShapeInfer(size_t number_of_priors) : m_number_of_priors(number_of_priors) {}
Result infer(
const std::vector<std::reference_wrapper<const VectorDims>>& input_shapes,
const std::unordered_map<size_t, MemoryPtr>& data_dependency) override {
const int* in_data = reinterpret_cast<const int*>(data_dependency.at(0)->getData());
const int H = in_data[0];
const int W = in_data[1];
const auto output = static_cast<size_t>(4 * H * W * m_number_of_priors);
return {{{2, output}}, ShapeInferStatus::success};
}
port_mask_t get_port_mask() const override {
return PortMask(0);
}
private:
size_t m_number_of_priors = 0;
};
class PriorBoxClusteredShapeInferFactory : public ShapeInferFactory {
public:
explicit PriorBoxClusteredShapeInferFactory(std::shared_ptr<ov::Node> op) : m_op(op) {}
ShapeInferPtr makeShapeInfer() const override {
auto priorBox = ov::as_type_ptr<const ngraph::opset1::PriorBoxClustered>(m_op);
if (!priorBox) {
IE_THROW() << "Unexpected op type in PriorBoxClustered shape inference factory: " << m_op->get_type_name();
}
const auto& attrs = priorBox->get_attrs();
auto number_of_priors = attrs.widths.size();
return std::make_shared<PriorBoxClusteredShapeInfer>(number_of_priors);
}
private:
std::shared_ptr<ov::Node> m_op;
};
} // namespace
bool PriorBoxClustered::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept {
try {
const auto priorBox = std::dynamic_pointer_cast<const ngraph::opset1::PriorBoxClustered>(op);

View File

@ -7,7 +7,7 @@
#include "ie_parallel.hpp"
#include "range.h"
#include <utils/general_utils.h>
#include <utils/shape_inference/shape_inference_internal_dyn.hpp>
#include <shape_inference/shape_inference_internal_dyn.hpp>
using namespace InferenceEngine;

View File

@ -16,7 +16,7 @@
#include "nodes/common/reorder_prim.h"
#include "convert.h"
#include <common/primitive_hashing_utils.hpp>
#include <utils/shape_inference/shape_inference_pass_through.hpp>
#include <shape_inference/shape_inference_pass_through.hpp>
using namespace dnnl;
using namespace InferenceEngine;

View File

@ -9,9 +9,7 @@
#include <dnnl_extension_utils.h>
#include <openvino/opsets/opset1.hpp>
#include <ie_ngraph_utils.hpp>
#include <utils/shape_inference/static_shape.hpp>
#include <utils/shape_inference/shape_inference.hpp>
#include "utils/shape_inference/shape_inference_cpu.hpp"
#include "shape_inference/custom/reshape.hpp"
#include "common/cpu_memcpy.h"
@ -36,196 +34,6 @@ bool Reshape::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op
return true;
}
namespace {
class ReshapeShapeInfer : public ShapeInferEmptyPads {
public:
ReshapeShapeInfer(bool specialZero) : m_specialZero(specialZero) {}
Result infer(const std::vector<std::reference_wrapper<const VectorDims>>& input_shapes,
const std::unordered_map<size_t, MemoryPtr>& data_dependency) override {
static constexpr size_t RESHAPE_SRC = 0, RESHAPE_PATTERN = 1;
const auto& inputShape = input_shapes[RESHAPE_SRC].get();
const size_t inputShapeSize = inputShape.size();
const auto memPtr = data_dependency.at(RESHAPE_PATTERN);
const auto data = memPtr->getData();
const auto& dims = memPtr->getStaticDims();
const auto outputPatternSize = std::accumulate(dims.begin(), dims.end(), 1, std::multiplies<Dim>());
std::vector<int64_t> outPattern = ov::get_raw_data_as<int64_t>(
InferenceEngine::details::convertPrecision(memPtr->getDesc().getPrecision()),
data,
outputPatternSize,
ov::util::Cast<int64_t>());
VectorDims outputShape(outputPatternSize);
size_t outputProduct = 1;
int32_t minusOneIdx = -1;
int32_t minusOneCount = 0;
for (int32_t i = 0; i < outputPatternSize; ++i) {
if (outPattern[i] == 0 && m_specialZero && i < static_cast<int32_t>(inputShapeSize)) {
outputShape[i] = inputShape[i];
} else if (outPattern[i] == -1) {
minusOneIdx = i;
minusOneCount++;
} else {
outputShape[i] = outPattern[i];
outputProduct *= outputShape[i];
}
}
size_t inputProduct = 1;
for (size_t i = 0; i < inputShapeSize; ++i) {
if (static_cast<int>(i) < outputPatternSize && outPattern[i] == 0 && m_specialZero)
continue;
inputProduct *= inputShape[i];
}
if (minusOneIdx >= 0) {
if (outputProduct != 0) {
outputShape[minusOneIdx] = inputProduct / outputProduct;
outputProduct *= outputShape[minusOneIdx];
} else {
outputShape[minusOneIdx] = 0;
}
}
if (minusOneCount > 1 || inputProduct != outputProduct) {
IE_THROW(Unexpected) << "[cpu]reshape: the shape of input data conflicts with the reshape pattern";
}
return {{std::move(outputShape)}, ShapeInferStatus::success};
}
port_mask_t get_port_mask() const override {
return PortMask(1);
}
private:
bool m_specialZero;
};
class SqueezeShapeInfer : public ShapeInferEmptyPads {
public:
SqueezeShapeInfer() {}
Result infer(const std::vector<std::reference_wrapper<const VectorDims>>& input_shapes,
const std::unordered_map<size_t, MemoryPtr>& data_dependency) override {
static constexpr size_t SQUEEZE_SRC = 0, SQUEEZE_PATTERN = 1;
const auto& inputShape = input_shapes[SQUEEZE_SRC].get();
const size_t inputShapeSize = inputShape.size();
auto itr = data_dependency.find(SQUEEZE_PATTERN);
VectorDims outputShape;
outputShape.reserve(inputShapeSize);
if (itr != data_dependency.end()) {
const auto memPtr = data_dependency.at(SQUEEZE_PATTERN);
const auto data = memPtr->getData();
const auto& dims = memPtr->getStaticDims();
const auto outputPatternSize = std::accumulate(dims.begin(), dims.end(), 1, std::multiplies<Dim>());
std::vector<int64_t> outPattern = ov::get_raw_data_as<int64_t>(
InferenceEngine::details::convertPrecision(memPtr->getDesc().getPrecision()),
data,
outputPatternSize,
ov::util::Cast<int64_t>());
std::vector<bool> removeMask(inputShapeSize, false);
bool existError = false;
for (int i = 0; i < outputPatternSize; i++) {
if (outPattern[i] < 0) {
outPattern[i] = inputShapeSize + outPattern[i];
}
if (outPattern[i] >= 0 && outPattern[i] < static_cast<int64_t>(inputShapeSize)) {
removeMask[outPattern[i]] = true;
} else {
existError = true;
break;
}
}
for (size_t i = 0; i < inputShapeSize; i++) {
if (!removeMask[i]) {
outputShape.push_back(inputShape[i]);
} else if (inputShape[i] != 1) {
existError = true;
break;
}
}
if (existError) {
IE_THROW(Unexpected) << "[cpu]squeeze: the shape of input data conflict with the squeeze pattern";
}
} else {
for (size_t i = 0; i < inputShapeSize; i++) {
if (inputShape[i] != 1) {
outputShape.push_back(inputShape[i]);
}
}
}
return {{std::move(outputShape)}, ShapeInferStatus::success};
}
port_mask_t get_port_mask() const override {
return PortMask(1);
}
};
class UnsqueezeShapeInfer : public ShapeInferEmptyPads {
public:
UnsqueezeShapeInfer() {}
Result infer(const std::vector<std::reference_wrapper<const VectorDims>>& input_shapes,
const std::unordered_map<size_t, MemoryPtr>& data_dependency) override {
static constexpr size_t UNSQUEEZE_SRC = 0, UNSQUEEZE_PATTERN = 1;
const auto& inputShape = input_shapes[UNSQUEEZE_SRC].get();
const size_t inputShapeSize = inputShape.size();
const auto memPtr = data_dependency.at(UNSQUEEZE_PATTERN);
const auto data = memPtr->getData();
const auto& dims = memPtr->getStaticDims();
const auto outputPatternSize = std::accumulate(dims.begin(), dims.end(), 1, std::multiplies<Dim>());
std::vector<int64_t> outPattern = ov::get_raw_data_as<int64_t>(
InferenceEngine::details::convertPrecision(memPtr->getDesc().getPrecision()),
data,
outputPatternSize,
ov::util::Cast<int64_t>());
size_t outputShapeSize = inputShapeSize + outputPatternSize;
VectorDims outputShape(outputShapeSize, 0);
bool existError = false;
for (int i = 0; i < outputPatternSize; i++) {
if (outPattern[i] < 0) {
outPattern[i] = outputShapeSize + outPattern[i];
}
if (outPattern[i] >= 0 && outPattern[i] < static_cast<int64_t>(outputShapeSize)) {
outputShape[outPattern[i]] = 1;
} else {
existError = true;
break;
}
}
for (size_t i = 0, y = 0; i < outputShapeSize; i++) {
if (outputShape[i] == 0) {
if (y < inputShapeSize) {
outputShape[i] = inputShape[y];
y++;
} else {
existError = true;
break;
}
}
}
if (existError) {
IE_THROW(Unexpected) << "[cpu]unsqueeze: the shape of input data conflicts with the unsqueeze pattern";
}
return {{std::move(outputShape)}, ShapeInferStatus::success};
}
port_mask_t get_port_mask() const override {
return PortMask(1);
}
};
class ReshapeShapeInferFactory : public ShapeInferFactory {
public:
ReshapeShapeInferFactory(std::shared_ptr<ov::Node> op) : m_op(op) {}
ShapeInferPtr makeShapeInfer() const override {
if (const auto reshapeOp = ov::as_type_ptr<const ov::op::v1::Reshape>(m_op)) {
return std::make_shared<ReshapeShapeInfer>(reshapeOp->get_special_zero());
} else if (ov::is_type<ov::op::v0::Squeeze>(m_op)) {
return std::make_shared<SqueezeShapeInfer>();
} else if (ov::is_type<ov::op::v0::Unsqueeze>(m_op)) {
return std::make_shared<UnsqueezeShapeInfer>();
} else {
IE_THROW(Unexpected) << "[cpu]reshape: " << m_op->get_type_name() << "is not implemented";
}
}
private:
std::shared_ptr<ov::Node> m_op;
};
} // namespace
Reshape::Reshape(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context) :
Node(op, context, ReshapeShapeInferFactory(op)) {
std::string errorMessage;

View File

@ -13,7 +13,7 @@
#include "memory_desc/dnnl_blocked_memory_desc.h"
#include <common/primitive_hashing_utils.hpp>
#include <memory>
#include <utils/shape_inference/shape_inference_ngraph.hpp>
#include <shape_inference/shape_inference_ngraph.hpp>
#include "transformations/utils/utils.hpp"
#include "ov_ops/augru_cell.hpp"

View File

@ -4,7 +4,7 @@
#include "shapeof.h"
#include <ngraph/opsets/opset1.hpp>
#include <utils/shape_inference/shape_inference_cpu.hpp>
#include "shape_inference/custom/shapeof.hpp"
using namespace InferenceEngine;
@ -12,35 +12,6 @@ namespace ov {
namespace intel_cpu {
namespace node {
namespace {
/**
* Implements Shape Of shape inference algorithm. The output shape is simply a 1D tensor with the size of the input tensor
* rank.
*
*/
class ShapeOfShapeInfer : public ShapeInferEmptyPads {
public:
ShapeOfShapeInfer() = default;
Result infer(
const std::vector<std::reference_wrapper<const VectorDims>>& input_shapes,
const std::unordered_map<size_t, MemoryPtr>& data_dependency) override {
IE_ASSERT(!input_shapes.empty());
return {{VectorDims{input_shapes.front().get().size()}}, ShapeInferStatus::success};
}
port_mask_t get_port_mask() const override {
return EMPTY_PORT_MASK;
}
};
class ShapeOfShapeInferFactory : public ShapeInferFactory {
public:
ShapeInferPtr makeShapeInfer() const override {
return std::make_shared<ShapeOfShapeInfer>();
}
};
} // namespace
bool ShapeOf::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept {
try {
if (!one_of(op->get_type_info(),

View File

@ -11,7 +11,7 @@
#include <ngraph/opsets/opset1.hpp>
#include "memory_desc/dnnl_blocked_memory_desc.h"
#include <common/primitive_hashing_utils.hpp>
#include <utils/shape_inference/shape_inference_pass_through.hpp>
#include <shape_inference/shape_inference_pass_through.hpp>
using namespace dnnl;
using namespace InferenceEngine;

View File

@ -8,8 +8,9 @@
#include "common/cpu_memcpy.h"
#include "input.h"
#include <ngraph/opsets/opset1.hpp>
#include <utils/shape_inference/shape_inference_ngraph.hpp>
#include <shape_inference/shape_inference_ngraph.hpp>
#include "slice_shape_inference_utils.hpp"
#include "shape_inference/custom/strided_slice.hpp"
#include <string>
@ -34,111 +35,6 @@ bool StridedSlice::isSupportedOperation(const std::shared_ptr<const ov::Node>& o
return true;
}
namespace {
constexpr IShapeInfer::port_mask_t port_mask = PortMask(/*BEGIN_ID*/1, /*END_ID*/2, /*STRIDE_ID*/3, /*AXES_ID*/4);
class StridedSliceShapeInfer : public ShapeInferEmptyPads {
public:
StridedSliceShapeInfer(size_t output_size,
std::unordered_set<int64_t> begin_mask,
std::unordered_set<int64_t> end_mask,
std::unordered_set<int64_t> new_axis_mask,
std::unordered_set<int64_t> shrink_axis_mask)
: m_outputShape(output_size, 1),
m_begin_mask_set(std::move(begin_mask)),
m_end_mask_set(std::move(end_mask)),
m_new_axis_mask_set(std::move(new_axis_mask)),
m_shrink_axis_mask_set(std::move(shrink_axis_mask)) {}
Result infer(
const std::vector<std::reference_wrapper<const VectorDims>>& input_shapes,
const std::unordered_map<size_t, MemoryPtr>& data_dependency) override {
// align with intel_cpu::node::StridedSlice
static constexpr size_t DATA_ID = 0, BEGIN_ID = 1, END_ID = 2, STRIDE_ID = 3;
const VectorDims& shapeIn = input_shapes[DATA_ID].get();
const VectorDims& shapeBegin = input_shapes[BEGIN_ID].get();
if (data_dependency.at(BEGIN_ID)->getDesc().getPrecision() != Precision::I32 ||
data_dependency.at(END_ID)->getDesc().getPrecision() != Precision::I32 ||
data_dependency.at(STRIDE_ID)->getDesc().getPrecision() != Precision::I32) {
IE_THROW(Unexpected) << "The data type of begin/end/stride is NOT I32, which is unexpected!";
}
auto beginPtr = reinterpret_cast<int32_t *>(data_dependency.at(BEGIN_ID)->getData());
auto endPtr = reinterpret_cast<int32_t *>(data_dependency.at(END_ID)->getData());
auto stridePtr = reinterpret_cast<int32_t *>(data_dependency.at(STRIDE_ID)->getData());
for (size_t i = 0, new_idx = 0; i < shapeIn.size(); ++i) {
if (m_new_axis_mask_set.count(i)) {
// deal with new_axis_mask
m_outputShape[new_idx] = 1;
m_outputShape[new_idx+1] = shapeIn[i];
new_idx+=2;
} else if (!m_shrink_axis_mask_set.count(i)) {
// deal with begin_mask and end_mask
if ((i >= shapeBegin[0]) || (shapeIn[i] == 0)) {
m_outputShape[new_idx] = shapeIn[i];
} else {
auto begin = m_begin_mask_set.count(i) ? 0 : beginPtr[i];
auto end = m_end_mask_set.count(i) ? shapeIn[i] : endPtr[i];
m_outputShape[new_idx] = ov::op::slice::get_sliced_value(shapeIn[i], begin, end, stridePtr[i]);
}
new_idx += 1;
}
}
return {{m_outputShape}, ShapeInferStatus::success};
}
port_mask_t get_port_mask() const override {
return port_mask;
}
private:
VectorDims m_outputShape;
const std::unordered_set<int64_t> m_begin_mask_set;
const std::unordered_set<int64_t> m_end_mask_set;
const std::unordered_set<int64_t> m_new_axis_mask_set;
const std::unordered_set<int64_t> m_shrink_axis_mask_set;
};
class StridedSliceShapeInferFactory : public ShapeInferFactory {
public:
StridedSliceShapeInferFactory(const std::shared_ptr<ov::Node>& op)
: m_op(op) {}
ShapeInferPtr makeShapeInfer() const override {
if (const auto Slice_op = ov::as_type_ptr<const ov::op::v8::Slice>(m_op)) {
return std::make_shared<NgraphShapeInfer>(make_shape_inference(m_op), port_mask);
} else if (const auto StridedSlice_op = ov::as_type_ptr<const ov::op::v1::StridedSlice>(m_op)) {
const auto& ellipsis_mask = StridedSlice_op->get_ellipsis_mask();
if (std::any_of(ellipsis_mask.begin(), ellipsis_mask.end(), [](int64_t x){ return x == 1; })) {
return std::make_shared<NgraphShapeInfer>(make_shape_inference(m_op), port_mask);
} else {
auto vec_to_set = [](const std::vector<int64_t>& vec){
std::unordered_set<int64_t> to_set;
for (size_t i = 0; i < vec.size(); ++i) {
if (vec[i] == 1) {
to_set.emplace(i);
}
}
return to_set;
};
return std::make_shared<StridedSliceShapeInfer>(
m_op->get_output_partial_shape(0).rank().get_length(),
vec_to_set(StridedSlice_op->get_begin_mask()),
vec_to_set(StridedSlice_op->get_end_mask()),
vec_to_set(StridedSlice_op->get_new_axis_mask()),
vec_to_set(StridedSlice_op->get_shrink_axis_mask()));
}
} else {
IE_THROW(NotImplemented) << "not Slice or StridedSlice";
}
}
private:
const std::shared_ptr<ov::Node> m_op;
};
} // namespace
StridedSlice::StridedSlice(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context) :
Node(op, context, StridedSliceShapeInferFactory(op)) {
std::string errorMessage;

View File

@ -33,6 +33,7 @@
#include "transformations/snippets/x64/pass/set_brgemm_cpu_blocking_params.hpp"
#include "transformations/cpu_opset/common/pass/convert_to_swish_cpu.hpp"
#include "transformations/defs.hpp"
#include "shape_inference/custom/subgraph.hpp"
using namespace InferenceEngine;
using namespace dnnl::impl::utils;
@ -43,38 +44,6 @@ using namespace Xbyak;
namespace ov {
namespace intel_cpu {
namespace node {
namespace {
/* This class implementation is a temporal WA
TODO: revise the implementation to remove the node reference*/
class SnippetShapeInfer : public ShapeInferEmptyPads {
public:
SnippetShapeInfer(Snippet* node) : m_node(node) {}
Result infer(
const std::vector<std::reference_wrapper<const VectorDims>>& input_shapes,
const std::unordered_map<size_t, MemoryPtr>& data_dependency) override {
return {m_node->shapeInfer(), ShapeInferStatus::success};
}
port_mask_t get_port_mask() const override {
return EMPTY_PORT_MASK;
}
private:
Snippet* m_node;
};
class SnippetShapeInferFactory : public ShapeInferFactory {
public:
SnippetShapeInferFactory(Snippet* node) : m_node(node) {}
ShapeInferPtr makeShapeInfer() const override {
return std::make_shared<SnippetShapeInfer>(m_node);
}
private:
Snippet* m_node;
};
} // namespace
Snippet::Snippet(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context)
: Node(op, context, SnippetShapeInferFactory(this)) {

View File

@ -14,7 +14,7 @@
#include "transformations/utils/utils.hpp"
#include "common/cpu_memcpy.h"
#include "common/reorder_prim.h"
#include <utils/shape_inference/shape_inference_internal_dyn.hpp>
#include <shape_inference/shape_inference_internal_dyn.hpp>
using namespace dnnl;
using namespace InferenceEngine;

View File

@ -10,7 +10,7 @@
#include <string>
#include <dnnl_extension_utils.h>
#include <common/primitive_hashing_utils.hpp>
#include "shape_inference/custom/transpose.hpp"
using namespace dnnl;
using namespace InferenceEngine;
@ -37,70 +37,6 @@ bool Transpose::isSupportedOperation(const std::shared_ptr<const ov::Node>& op,
return true;
}
namespace {
class TransposeDynShapeInfer : public ShapeInferEmptyPads {
public:
TransposeDynShapeInfer() = default;
Result infer(
const std::vector<std::reference_wrapper<const VectorDims>>& input_shapes,
const std::unordered_map<size_t, MemoryPtr>& data_dependency) override {
IE_THROW(NotImplemented) << "TODO: Support parameterized Order input for dynamic shapes.";
}
port_mask_t get_port_mask() const override {
return EMPTY_PORT_MASK;
}
private:
};
class TransposeShapeInfer : public ShapeInferEmptyPads {
public:
TransposeShapeInfer(const size_t& out_rank, const std::vector<size_t>& axes_vec)
: m_out_rank(out_rank), m_axes_vec(axes_vec), m_outputShape(out_rank, 1), m_needReverse(axes_vec.empty()) {}
Result infer(
const std::vector<std::reference_wrapper<const VectorDims>>& input_shapes,
const std::unordered_map<size_t, MemoryPtr>& data_dependency) override {
const VectorDims& shapeIn = input_shapes[0].get();
if (m_needReverse) {
for (size_t i = 0; i < m_out_rank; ++i) {
m_outputShape[i] = shapeIn[m_out_rank - 1 - i];
}
} else {
for (size_t i = 0; i < m_out_rank; ++i) {
m_outputShape[i] = shapeIn[m_axes_vec[i]];
}
}
return {{m_outputShape}, ShapeInferStatus::success};
}
port_mask_t get_port_mask() const override {
return EMPTY_PORT_MASK;
}
private:
const size_t m_out_rank;
const std::vector<size_t> m_axes_vec;
VectorDims m_outputShape;
const bool m_needReverse;
};
class TransposeShapeInferFactory : public ShapeInferFactory {
public:
TransposeShapeInferFactory(const std::shared_ptr<ov::Node>& op) : m_op(op) {}
ShapeInferPtr makeShapeInfer() const override {
if (const auto order = ov::as_type_ptr<const ov::op::v0::Constant>(m_op->get_input_node_shared_ptr(ov::op::v1::Transpose::ORDER))) {
const auto axes_vec = order->cast_vector<size_t>();
return std::make_shared<TransposeShapeInfer>(m_op->get_output_partial_shape(0).rank().get_length(), axes_vec);
} else {
return std::make_shared<TransposeDynShapeInfer>();
}
}
private:
const std::shared_ptr<ov::Node> m_op;
};
} // namespace
Transpose::Transpose(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context)
: Node(op, context, TransposeShapeInferFactory(op)) {
std::string errorMessage;

View File

@ -7,7 +7,7 @@
#include "ie_parallel.hpp"
#include <openvino/op/unique.hpp>
#include "common/cpu_memcpy.h"
#include <utils/shape_inference/shape_inference_internal_dyn.hpp>
#include <shape_inference/shape_inference_internal_dyn.hpp>
using namespace InferenceEngine;
using namespace ov::intel_cpu;

View File

@ -0,0 +1,46 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "adaptive_pooling.hpp"
#include "utils.hpp"
#include "ie_ngraph_utils.hpp"
namespace ov {
namespace intel_cpu {
namespace node {
using namespace InferenceEngine;
/**
* Implements Adaptive Pooling shape inference algorithm. The output tensor shape consists of the input [N, C] dimensions and
* the [D_out, H_out, W_out] dimensions, which are placed in the second input parameter.
*
*/
Result AdaptivePoolingShapeInfer::infer(
const std::vector<std::reference_wrapper<const VectorDims>>& input_shapes,
const std::unordered_map<size_t, MemoryPtr>& data_dependency) {
const auto& inputDims = input_shapes[0].get();
const auto& spatialDims = input_shapes[1].get();
const auto inputRank = inputDims.size();
const auto spatialDimsSize = spatialDims[0];
VectorDims outputDims(inputRank);
outputDims[0] = inputDims[0];
outputDims[1] = inputDims[1];
auto newSpatialDimsPtr = reinterpret_cast<int32_t *>(data_dependency.at(1)->getData());
for (size_t i = 0; i < spatialDimsSize; i++) {
outputDims[i + 2] = newSpatialDimsPtr[i];
}
std::vector<VectorDims> result(m_outputs_count, outputDims);
return {std::move(result), ShapeInferStatus::success};
}
ShapeInferPtr AdaptivePoolingShapeInferFactory::makeShapeInfer() const {
size_t outputs_count = m_op->get_output_size();
return std::make_shared<AdaptivePoolingShapeInfer>(outputs_count);
}
} // namespace node
} // namespace intel_cpu
} // namespace ov

View File

@ -0,0 +1,46 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <node.h>
#include "shape_inference/shape_inference_cpu.hpp"
#pragma once
namespace ov {
namespace intel_cpu {
namespace node {
using Result = IShapeInfer::Result;
/**
* Implements Adaptive Pooling shape inference algorithm. The output tensor shape consists of the input [N, C] dimensions and
* the [D_out, H_out, W_out] dimensions, which are placed in the second input parameter.
*
*/
class AdaptivePoolingShapeInfer : public ShapeInferEmptyPads {
public:
explicit AdaptivePoolingShapeInfer(size_t outputs_count) : m_outputs_count(outputs_count) {}
Result infer(
const std::vector<std::reference_wrapper<const VectorDims>>& input_shapes,
const std::unordered_map<size_t, MemoryPtr>& data_dependency) override;
port_mask_t get_port_mask() const override {
return PortMask(1);
}
private:
size_t m_outputs_count;
};
class AdaptivePoolingShapeInferFactory : public ShapeInferFactory {
public:
AdaptivePoolingShapeInferFactory(std::shared_ptr<ov::Node> op) : m_op(op) {}
ShapeInferPtr makeShapeInfer() const override;
private:
std::shared_ptr<ov::Node> m_op;
};
} // namespace node
} // namespace intel_cpu
} // namespace ov

View File

@ -0,0 +1,38 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "nodes/color_convert.h"
#include "color_convert.hpp"
#include "utils.hpp"
#include "ie_ngraph_utils.hpp"
namespace ov {
namespace intel_cpu {
namespace node {
using namespace InferenceEngine;
/**
* Implements Color Convert shape inference algorithm. Depending on wether it has only single plain H dimension is
* passed through or recalculated as 2/3 of the initial size.
*
*/
Result ColorConvertShapeInfer::infer(const std::vector<std::reference_wrapper<const VectorDims>>& input_shapes,
const std::unordered_map<size_t, MemoryPtr>& data_dependency) {
const auto& dims = input_shapes.front().get();
if (dims.size() != 4) {
OPENVINO_THROW("NV12Converter node has incorrect input dimensions");
}
return { m_singlePlain
? std::vector<VectorDims>{ { dims[ColorConvert::Converter::N_DIM], dims[ColorConvert::Converter::H_DIM] * 2 / 3, dims[ColorConvert::Converter::W_DIM], 3 } }
:
std::vector<VectorDims>{ { dims[ColorConvert::Converter::N_DIM], dims[ColorConvert::Converter::H_DIM], dims[ColorConvert::Converter::W_DIM], 3 } },
ShapeInferStatus::success };
}
ShapeInferPtr ColorConvertShapeInferFactory::makeShapeInfer() const {
bool isSinglePlain = m_op->get_input_size() == 1;
return std::make_shared<ColorConvertShapeInfer>(isSinglePlain);
}
} // namespace node
} // namespace intel_cpu
} // namespace ov

View File

@ -0,0 +1,44 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <node.h>
#include "shape_inference/shape_inference_cpu.hpp"
#pragma once
namespace ov {
namespace intel_cpu {
namespace node {
using Result = IShapeInfer::Result;
/**
* Implements Color Convert shape inference algorithm. Depending on wether it has only single plain H dimension is
* passed through or recalculated as 2/3 of the initial size.
*
*/
class ColorConvertShapeInfer : public ShapeInferEmptyPads {
public:
ColorConvertShapeInfer(bool singlePlain) : m_singlePlain(singlePlain) {}
Result infer(const std::vector<std::reference_wrapper<const VectorDims>>& input_shapes,
const std::unordered_map<size_t, MemoryPtr>& data_dependency) override;
port_mask_t get_port_mask() const override {
return EMPTY_PORT_MASK;
}
private:
bool m_singlePlain = false;
};
class ColorConvertShapeInferFactory : public ShapeInferFactory {
public:
ColorConvertShapeInferFactory(std::shared_ptr<ov::Node> op) : m_op(op) {}
ShapeInferPtr makeShapeInfer() const override;
private:
std::shared_ptr<ov::Node> m_op;
};
} // namespace node
} // namespace intel_cpu
} // namespace ov

View File

@ -0,0 +1,57 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "eltwise.hpp"
#include "utils.hpp"
#include "ie_ngraph_utils.hpp"
namespace ov {
namespace intel_cpu {
namespace node {
using namespace InferenceEngine;
/**
* Implements Eltwise shape inference algorithm. The algorithm is based on broadcasting all the input shapes
* according to the NUMPY broadcast rule. This implementation is more lightweight than the ngraph one.
*
*/
Result EltwiseShapeInfer::infer(
const std::vector<std::reference_wrapper<const VectorDims>>& input_shapes,
const std::unordered_map<size_t, MemoryPtr>& data_dependency) {
size_t max_rank = 0;
size_t max_rank_idx = 0;
for (size_t i = 0; i < input_shapes.size(); ++i) {
auto item_rank = input_shapes[i].get().size();
if (item_rank > max_rank) {
max_rank = item_rank;
max_rank_idx = i;
}
}
auto output_shape = input_shapes[max_rank_idx].get();
// use NUMPY broadcast rule
for (size_t i = 0; i < input_shapes.size(); i++) {
if (i == max_rank_idx)
continue;
auto& input_shape = input_shapes[i].get();
if (input_shape.size() > output_shape.size()) {
OPENVINO_THROW("Eltwise shape infer input and output shapes rank mismatch");
}
size_t offset = output_shape.size() - input_shape.size();
for (size_t j = 0; j < input_shape.size(); ++j) {
if (input_shape[j] != output_shape[offset + j]) {
if (output_shape[offset + j] == 1) {
output_shape[offset + j] = input_shape[j];
} else {
if (input_shape[j] != 1) OPENVINO_THROW("Eltwise shape infer input shapes dim index: ", j, " mismatch");
}
}
}
}
return { { std::move(output_shape) }, ShapeInferStatus::success };
}
} // namespace node
} // namespace intel_cpu
} // namespace ov

View File

@ -0,0 +1,38 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <node.h>
#include "shape_inference/shape_inference_cpu.hpp"
#pragma once
namespace ov {
namespace intel_cpu {
namespace node {
using Result = IShapeInfer::Result;
/**
* Implements Eltwise shape inference algorithm. The algorithm is based on broadcasting all the input shapes
* according to the NUMPY broadcast rule. This implementation is more lightweight than the ngraph one.
*
*/
class EltwiseShapeInfer : public ShapeInferEmptyPads {
public:
Result infer(
const std::vector<std::reference_wrapper<const VectorDims>>& input_shapes,
const std::unordered_map<size_t, MemoryPtr>& data_dependency) override;
port_mask_t get_port_mask() const override {
return EMPTY_PORT_MASK;
}
};
class EltwiseShapeInferFactory : public ShapeInferFactory {
public:
ShapeInferPtr makeShapeInfer() const override {
return std::make_shared<EltwiseShapeInfer>();
}
};
} // namespace node
} // namespace intel_cpu
} // namespace ov

View File

@ -0,0 +1,39 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "fullyconnected.hpp"
#include "utils.hpp"
#include "ie_ngraph_utils.hpp"
namespace ov {
namespace intel_cpu {
namespace node {
using namespace InferenceEngine;
Result FCShapeInfer::infer(
const std::vector<std::reference_wrapper<const VectorDims>>& input_shapes,
const std::unordered_map<size_t, MemoryPtr>& data_dependency) {
const VectorDims& activationShape = input_shapes[0].get();
const VectorDims& weightShape = input_shapes[1].get();
size_t activationRank = activationShape.size();
size_t channelRank = weightShape.size() - 1;
// activation weight output_shape
// NCHW CoCHW NCo
// TNC CoC TNCo
// NC CoC NCo
VectorDims outputShape(out_rank, 1);
// set Co
outputShape.back() = weightShape[0];
// set batch dims
size_t batchRank = activationRank - channelRank;
size_t startIdx = out_rank - batchRank - 1;
for (size_t i = 0; i < batchRank; i++) {
outputShape[i + startIdx] = activationShape[i];
}
return {{std::move(outputShape)}, ShapeInferStatus::success};
}
} // namespace node
} // namespace intel_cpu
} // namespace ov

View File

@ -0,0 +1,42 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <node.h>
#include "shape_inference/shape_inference_cpu.hpp"
#pragma once
namespace ov {
namespace intel_cpu {
namespace node {
using Result = IShapeInfer::Result;
class FCShapeInfer : public ShapeInferEmptyPads {
public:
FCShapeInfer(size_t outPut_rank) : out_rank(outPut_rank) {}
Result infer(
const std::vector<std::reference_wrapper<const VectorDims>>& input_shapes,
const std::unordered_map<size_t, MemoryPtr>& data_dependency) override;
port_mask_t get_port_mask() const override {
return EMPTY_PORT_MASK;
}
private:
size_t out_rank = 0;
};
class FCShapeInferFactory : public ShapeInferFactory {
public:
FCShapeInferFactory(std::shared_ptr<ov::Node> op) : m_op(op) {}
ShapeInferPtr makeShapeInfer() const override {
return std::make_shared<FCShapeInfer>(m_op->get_output_partial_shape(0).rank().get_length());
}
private:
std::shared_ptr<const ngraph::Node> m_op;
};
} // namespace node
} // namespace intel_cpu
} // namespace ov

View File

@ -0,0 +1,60 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "gather.hpp"
#include "utils.hpp"
#include "ie_ngraph_utils.hpp"
namespace ov {
namespace intel_cpu {
namespace node {
using namespace InferenceEngine;
Result GatherShapeInfer::infer(const std::vector<std::reference_wrapper<const VectorDims>>& input_shapes,
const std::unordered_map<size_t, MemoryPtr>& data_dependency) {
static constexpr size_t GATHER_DATA = 0, GATHER_INDICES = 1, GATHER_AXIS = 2;
const auto& input_shape = input_shapes[GATHER_DATA].get();
// Use VectorDims{} instead of {1} for Scalar
const auto& indices_shape = m_isIndicesScalar ? VectorDims{} :
input_shapes[GATHER_INDICES].get();
if (!m_isAxisInputConst) {
if (data_dependency.at(GATHER_AXIS)->getDesc().getPrecision() != Precision::I32) {
OPENVINO_THROW("Unsupported precision ", data_dependency.at(GATHER_AXIS)->getDesc().getPrecision(),
" for axis tensor.");
}
m_axis = reinterpret_cast<const int32_t*>(data_dependency.at(GATHER_AXIS)->getData())[0];
}
if (m_axis < 0) {
m_axis += input_shape.size();
}
if (m_batchDims < 0) {
m_batchDims += indices_shape.size();
}
VectorDims output_shape;
output_shape.reserve(input_shape.size() + indices_shape.size() - m_batchDims - 1);
output_shape.insert(output_shape.end(), input_shape.begin(), input_shape.begin() + m_axis);
output_shape.insert(output_shape.end(), indices_shape.begin() + m_batchDims, indices_shape.end());
output_shape.insert(output_shape.end(), input_shape.begin() + m_axis + 1, input_shape.end());
return {{std::move(output_shape)}, ShapeInferStatus::success};
}
ShapeInferPtr GatherShapeInferFactory::makeShapeInfer() const {
static constexpr size_t GATHER_INDICES = 1, GATHER_AXIS = 2;
bool isAxisInputConst = ov::is_type<ov::op::v0::Constant>(m_op->get_input_node_ptr(GATHER_AXIS));
const auto& indicesShape = m_op->get_input_partial_shape(GATHER_INDICES);
if (!indicesShape.rank().is_static()) {
OPENVINO_THROW("indicesShape do not support dynamic rank.");
}
bool isIndicesScalar = indicesShape.rank().get_length() == 0;
int axis = isAxisInputConst ? ov::as_type<ov::op::v0::Constant>(m_op->get_input_node_ptr(
GATHER_AXIS))->cast_vector<int>()[0] : 0;
int batchDims = ov::is_type<ov::op::v8::Gather>(m_op) ? static_cast<int>(ov::as_type_ptr<ov::op::v8::Gather>
(m_op)->get_batch_dims()) : (
ov::is_type<ov::op::v7::Gather>(m_op) ? static_cast<int>(ov::as_type_ptr<ov::op::v7::Gather>
(m_op)->get_batch_dims()) : 0);
return std::make_shared<GatherShapeInfer>(isAxisInputConst, isIndicesScalar, axis, batchDims);
}
} // namespace node
} // namespace intel_cpu
} // namespace ov

View File

@ -0,0 +1,43 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <node.h>
#include "shape_inference/shape_inference_cpu.hpp"
#pragma once
namespace ov {
namespace intel_cpu {
namespace node {
using Result = IShapeInfer::Result;
class GatherShapeInfer : public ShapeInferEmptyPads {
public:
GatherShapeInfer(bool isAxisInputConst, bool isIndicesScalar, int axis, int batchDims) : m_isAxisInputConst(isAxisInputConst),
m_isIndicesScalar(isIndicesScalar), m_axis(axis), m_batchDims(batchDims) {}
Result infer(const std::vector<std::reference_wrapper<const VectorDims>>& input_shapes,
const std::unordered_map<size_t, MemoryPtr>& data_dependency) override;
port_mask_t get_port_mask() const override {
return PortMask(2);
}
private:
bool m_isAxisInputConst = false;
bool m_isIndicesScalar = false;
int m_axis = 0;
int m_batchDims = 0;
};
class GatherShapeInferFactory : public ShapeInferFactory {
public:
GatherShapeInferFactory(std::shared_ptr<ov::Node> op) : m_op(op) {}
ShapeInferPtr makeShapeInfer() const override;
private:
std::shared_ptr<ov::Node> m_op;
};
} // namespace node
} // namespace intel_cpu
} // namespace ov

View File

@ -0,0 +1,64 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "matmul.hpp"
#include "utils.hpp"
#include "ie_ngraph_utils.hpp"
#include <ngraph/opsets/opset1.hpp>
namespace ov {
namespace intel_cpu {
namespace node {
using namespace InferenceEngine;
Result MMShapeInfer::infer(
const std::vector<std::reference_wrapper<const VectorDims>>& input_shapes,
const std::unordered_map<size_t, MemoryPtr>& data_dependency) {
const VectorDims& shapeA = input_shapes[0].get();
const VectorDims& shapeB = input_shapes[1].get();
const size_t rankA = shapeA.size();
const size_t rankB = shapeB.size();
// getSupportedDescriptors has done some shape check.
// 1. Needn't assert the scalar type since the matmul_shape_inference has checked.
// 2. Needn't check the compatibility of the last two dims
// 3. 1-D x 1-D is needed
// 4. transpose is necessary
// 5. Just support the same rank of matmul
// 6. simplify the broadcast check
if (rankA == 1 && rankB == 1 && shapeA[0] == shapeB[0]) {
return {{m_shapeY}, ShapeInferStatus::success};
}
m_shapeY[m_out_rank-2] = m_transpose_a ? shapeA[rankA-1] : shapeA[rankA-2];
m_shapeY[m_out_rank-1] = m_transpose_b ? shapeB[rankB-2] : shapeB[rankB-1];
for (size_t i=0; i < m_out_rank-2; ++i) {
if (shapeA[i] != shapeB[i]) {
if (shapeB[i] == 1) {
m_shapeY[i] = shapeA[i];
continue;
} else if (shapeA[i] != 1) {
OPENVINO_THROW("Incompatible MatMul batch dimension. Cant merge the first input dimension=",
shapeA[i], " with second input dimension=", shapeB[i], " at index=", i);
}
}
m_shapeY[i] = shapeB[i];
}
return {{m_shapeY}, ShapeInferStatus::success};
}
ShapeInferPtr MMShapeInferFactory::makeShapeInfer() const {
if (const auto matmul = ov::as_type_ptr<const ngraph::opset1::MatMul>(m_op)) {
const auto output_rank = matmul->get_output_partial_shape(0).rank().get_length();
const bool transpose_a = matmul->get_transpose_a();
const bool transpose_b = matmul->get_transpose_b();
return std::make_shared<MMShapeInfer>(output_rank, transpose_a, transpose_b);
} else {
OPENVINO_THROW("Unexpected operation type in the MatMul shape inference factory");
}
}
} // namespace node
} // namespace intel_cpu
} // namespace ov

View File

@ -0,0 +1,44 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <node.h>
#include "shape_inference/shape_inference_cpu.hpp"
#pragma once
namespace ov {
namespace intel_cpu {
namespace node {
using Result = IShapeInfer::Result;
class MMShapeInfer : public ShapeInferEmptyPads {
public:
MMShapeInfer(const size_t& out_rank, const bool& transpose_a, const bool& transpose_b) :
m_out_rank(out_rank), m_transpose_a(transpose_a), m_transpose_b(transpose_b) {
m_shapeY = VectorDims(m_out_rank, 1); // for output and cache
}
Result infer(
const std::vector<std::reference_wrapper<const VectorDims>>& input_shapes,
const std::unordered_map<size_t, MemoryPtr>& data_dependency) override;
port_mask_t get_port_mask() const override {
return EMPTY_PORT_MASK;
}
private:
VectorDims m_shapeY;
const size_t m_out_rank;
const bool m_transpose_a;
const bool m_transpose_b;
};
class MMShapeInferFactory : public ShapeInferFactory {
public:
MMShapeInferFactory(const std::shared_ptr<ngraph::Node>& op) : m_op(op) {}
ShapeInferPtr makeShapeInfer() const override;
private:
std::shared_ptr<ngraph::Node> m_op;
};
} // namespace node
} // namespace intel_cpu
} // namespace ov

View File

@ -0,0 +1,31 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "transformations/cpu_opset/common/op/ngram.hpp"
#include "ngram.hpp"
#include "utils.hpp"
#include "ie_ngraph_utils.hpp"
namespace ov {
namespace intel_cpu {
namespace node {
using namespace InferenceEngine;
Result NgramShapeInfer::infer(
const std::vector<std::reference_wrapper<const VectorDims>>& input_shapes,
const std::unordered_map<size_t, MemoryPtr>& data_dependency) {
auto output_shape = input_shapes[0].get();
output_shape[1] *= m_k;
return {{std::move(output_shape)}, ShapeInferStatus::success};
}
ShapeInferPtr NgramShapeInferFactory::makeShapeInfer() const {
auto ngram = ov::as_type_ptr<NgramNode>(m_op);
if (!ngram) {
OPENVINO_THROW("Wrong operation type");
}
return std::make_shared<NgramShapeInfer>(ngram->get_k());
}
} // namespace node
} // namespace intel_cpu
} // namespace ov

View File

@ -0,0 +1,39 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <node.h>
#include "shape_inference/shape_inference_cpu.hpp"
#pragma once
namespace ov {
namespace intel_cpu {
namespace node {
using Result = IShapeInfer::Result;
class NgramShapeInfer : public ShapeInferEmptyPads {
public:
NgramShapeInfer(const size_t k) : m_k(k) {}
Result infer(
const std::vector<std::reference_wrapper<const VectorDims>>& input_shapes,
const std::unordered_map<size_t, MemoryPtr>& data_dependency) override;
port_mask_t get_port_mask() const override {
return EMPTY_PORT_MASK;
}
private:
size_t m_k;
};
class NgramShapeInferFactory : public ShapeInferFactory {
public:
NgramShapeInferFactory(const std::shared_ptr<ov::Node>& op) : m_op(op) {}
ShapeInferPtr makeShapeInfer() const override;
private:
std::shared_ptr<ov::Node> m_op;
};
} // namespace node
} // namespace intel_cpu
} // namespace ov

View File

@ -0,0 +1,47 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "one_hot.hpp"
#include "utils.hpp"
#include "ie_ngraph_utils.hpp"
#include <ngraph/opsets/opset1.hpp>
namespace ov {
namespace intel_cpu {
namespace node {
using namespace InferenceEngine;
/**
* Implements One Hot shape inference algorithm. The output shape is the input `indices` tensor shape, where a new axis
* of size `depth` is inserted at the dimension defined by the `axis` parameter.
*
*/
Result OneHotShapeInfer::infer(
const std::vector<std::reference_wrapper<const VectorDims>>& input_shapes,
const std::unordered_map<size_t, MemoryPtr>& data_dependency) {
auto depth = reinterpret_cast<int32_t *>(data_dependency.at(1)->getData())[0];
auto result = input_shapes.front().get();
result.insert(result.begin() + m_axis, depth);
return {{std::move(result)}, ShapeInferStatus::success};
}
ShapeInferPtr OneHotShapeInferFactory::makeShapeInfer() const {
auto oneHot = ov::as_type_ptr<const ngraph::opset1::OneHot>(m_op);
if (!oneHot) {
OPENVINO_THROW("Unexpected op type in OneHot shape inference factory: ", m_op->get_type_name());
}
auto axis = oneHot->get_axis();
auto dstShape = oneHot->get_output_partial_shape(0);
int output_dims_size = dstShape.size();
if (0 == output_dims_size) output_dims_size = 1;
if (axis < 0) {
axis += output_dims_size;
}
return std::make_shared<OneHotShapeInfer>(axis);
}
} // namespace node
} // namespace intel_cpu
} // namespace ov

View File

@ -0,0 +1,45 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <node.h>
#include "shape_inference/shape_inference_cpu.hpp"
#pragma once
namespace ov {
namespace intel_cpu {
namespace node {
using Result = IShapeInfer::Result;
/**
* Implements One Hot shape inference algorithm. The output shape is the input `indices` tensor shape, where a new axis
* of size `depth` is inserted at the dimension defined by the `axis` parameter.
*
*/
class OneHotShapeInfer : public ShapeInferEmptyPads {
public:
explicit OneHotShapeInfer(int64_t axis) : m_axis(axis) {}
Result infer(
const std::vector<std::reference_wrapper<const VectorDims>>& input_shapes,
const std::unordered_map<size_t, MemoryPtr>& data_dependency) override;
port_mask_t get_port_mask() const override {
return PortMask(1);
}
private:
int64_t m_axis = 0;
};
class OneHotShapeInferFactory : public ShapeInferFactory {
public:
OneHotShapeInferFactory(std::shared_ptr<ov::Node> op) : m_op(op) {}
ShapeInferPtr makeShapeInfer() const override;
private:
std::shared_ptr<ov::Node> m_op;
};
} // namespace node
} // namespace intel_cpu
} // namespace ov

View File

@ -0,0 +1,40 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "priorbox.hpp"
#include "utils.hpp"
#include "ie_ngraph_utils.hpp"
#include <ngraph/opsets/opset1.hpp>
namespace ov {
namespace intel_cpu {
namespace node {
using namespace InferenceEngine;
/**
* Implements Prior Box Clustered shape inference algorithm. The output shape is [2, 4 * height * width * number_of_priors].
* `number_of_priors` is an attribute of the operation. heigh and width are in the the first input parameter.
*
*/
Result PriorBoxShapeInfer::infer(
const std::vector<std::reference_wrapper<const VectorDims>>& input_shapes,
const std::unordered_map<size_t, MemoryPtr>& data_dependency) {
const int* in_data = reinterpret_cast<const int*>(data_dependency.at(0)->getData());
const int H = in_data[0];
const int W = in_data[1];
const auto output = static_cast<size_t>(4 * H * W * m_number_of_priors);
return {{{2, output}}, ShapeInferStatus::success};
}
ShapeInferPtr PriorBoxShapeInferFactory::makeShapeInfer() const {
auto priorBox = ov::as_type_ptr<const ngraph::opset1::PriorBox>(m_op);
if (!priorBox) {
OPENVINO_THROW("Unexpected op type in PriorBox shape inference factory: ", m_op->get_type_name());
}
const auto& attrs = priorBox->get_attrs();
auto number_of_priors = ngraph::opset1::PriorBox::number_of_priors(attrs);
return std::make_shared<PriorBoxShapeInfer>(number_of_priors);
}
} // namespace node
} // namespace intel_cpu
} // namespace ov

View File

@ -0,0 +1,45 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <node.h>
#include "shape_inference/shape_inference_cpu.hpp"
#pragma once
namespace ov {
namespace intel_cpu {
namespace node {
using Result = IShapeInfer::Result;
class PriorBoxShapeInfer : public ShapeInferEmptyPads {
/**
* Implements Prior Box Clustered shape inference algorithm. The output shape is [2, 4 * height * width * number_of_priors].
* `number_of_priors` is an attribute of the operation. heigh and width are in the the first input parameter.
*
*/
public:
explicit PriorBoxShapeInfer(int64_t number_of_priors) : m_number_of_priors(number_of_priors) {}
Result infer(
const std::vector<std::reference_wrapper<const VectorDims>>& input_shapes,
const std::unordered_map<size_t, MemoryPtr>& data_dependency) override;
port_mask_t get_port_mask() const override {
return PortMask(0);
}
private:
int64_t m_number_of_priors = 0;
};
class PriorBoxShapeInferFactory : public ShapeInferFactory {
public:
explicit PriorBoxShapeInferFactory(std::shared_ptr<ov::Node> op) : m_op(op) {}
ShapeInferPtr makeShapeInfer() const override;
private:
std::shared_ptr<ov::Node> m_op;
};
} // namespace node
} // namespace intel_cpu
} // namespace ov

View File

@ -0,0 +1,42 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "priorbox_clustered.hpp"
#include "utils.hpp"
#include "ie_ngraph_utils.hpp"
#include <ngraph/opsets/opset1.hpp>
namespace ov {
namespace intel_cpu {
namespace node {
using namespace InferenceEngine;
/**
* Implements Prior Box Clustered shape inference algorithm. The output shape is [2, 4 * height * width * number_of_priors].
* `number_of_priors` is an attribute of the operation. heigh and width are in the the first input parameter.
*
*/
Result PriorBoxClusteredShapeInfer::infer(
const std::vector<std::reference_wrapper<const VectorDims>>& input_shapes,
const std::unordered_map<size_t, MemoryPtr>& data_dependency) {
const int* in_data = reinterpret_cast<const int*>(data_dependency.at(0)->getData());
const int H = in_data[0];
const int W = in_data[1];
const auto output = static_cast<size_t>(4 * H * W * m_number_of_priors);
return {{{2, output}}, ShapeInferStatus::success};
}
ShapeInferPtr PriorBoxClusteredShapeInferFactory::makeShapeInfer() const {
auto priorBox = ov::as_type_ptr<const ngraph::opset1::PriorBoxClustered>(m_op);
if (!priorBox) {
OPENVINO_THROW("Unexpected op type in PriorBoxClustered shape inference factory: ", m_op->get_type_name());
}
const auto& attrs = priorBox->get_attrs();
auto number_of_priors = attrs.widths.size();
return std::make_shared<PriorBoxClusteredShapeInfer>(number_of_priors);
}
} // namespace node
} // namespace intel_cpu
} // namespace ov

View File

@ -0,0 +1,46 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <node.h>
#include "shape_inference/shape_inference_cpu.hpp"
#pragma once
namespace ov {
namespace intel_cpu {
namespace node {
using Result = IShapeInfer::Result;
/**
* Implements Prior Box Clustered shape inference algorithm. The output shape is [2, 4 * height * width * number_of_priors].
* `number_of_priors` is an attribute of the operation. heigh and width are in the the first input parameter.
*
*/
class PriorBoxClusteredShapeInfer : public ShapeInferEmptyPads {
public:
explicit PriorBoxClusteredShapeInfer(size_t number_of_priors) : m_number_of_priors(number_of_priors) {}
Result infer(
const std::vector<std::reference_wrapper<const VectorDims>>& input_shapes,
const std::unordered_map<size_t, MemoryPtr>& data_dependency) override;
port_mask_t get_port_mask() const override {
return PortMask(0);
}
private:
size_t m_number_of_priors = 0;
};
class PriorBoxClusteredShapeInferFactory : public ShapeInferFactory {
public:
explicit PriorBoxClusteredShapeInferFactory(std::shared_ptr<ov::Node> op) : m_op(op) {}
ShapeInferPtr makeShapeInfer() const override;
private:
std::shared_ptr<ov::Node> m_op;
};
} // namespace node
} // namespace intel_cpu
} // namespace ov

View File

@ -0,0 +1,193 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "reshape.hpp"
#include <vector>
#include "utils.hpp"
#include "ie_ngraph_utils.hpp"
#include <utils/general_utils.h>
namespace ov {
namespace intel_cpu {
namespace node {
Result ReshapeShapeInfer::infer(const std::vector<std::reference_wrapper<const VectorDims>>& input_shapes,
const std::unordered_map<size_t, MemoryPtr>& data_dependency) {
static constexpr size_t RESHAPE_SRC = 0, RESHAPE_PATTERN = 1;
const auto& inputShape = input_shapes[RESHAPE_SRC].get();
const size_t inputShapeSize = inputShape.size();
const auto memPtr = data_dependency.at(RESHAPE_PATTERN);
const auto data = memPtr->getData();
const auto& dims = memPtr->getStaticDims();
const auto outputPatternSize = std::accumulate(dims.begin(), dims.end(), 1, std::multiplies<Dim>());
std::vector<int64_t> outPattern = ov::get_raw_data_as<int64_t>(
InferenceEngine::details::convertPrecision(memPtr->getDesc().getPrecision()),
data,
outputPatternSize,
ov::util::Cast<int64_t>());
VectorDims outputShape(outputPatternSize);
size_t outputProduct = 1;
int32_t minusOneIdx = -1;
int32_t minusOneCount = 0;
for (int32_t i = 0; i < outputPatternSize; ++i) {
if (outPattern[i] == 0 && m_specialZero && i < static_cast<int32_t>(inputShapeSize)) {
outputShape[i] = inputShape[i];
} else if (outPattern[i] == -1) {
minusOneIdx = i;
minusOneCount++;
} else {
outputShape[i] = outPattern[i];
outputProduct *= outputShape[i];
}
}
size_t inputProduct = 1;
for (size_t i = 0; i < inputShapeSize; ++i) {
if (static_cast<int>(i) < outputPatternSize && outPattern[i] == 0 && m_specialZero) {
continue;
}
inputProduct *= inputShape[i];
}
if (minusOneIdx >= 0) {
if (outputProduct != 0) {
outputShape[minusOneIdx] = inputProduct / outputProduct;
outputProduct *= outputShape[minusOneIdx];
} else {
outputShape[minusOneIdx] = 0;
}
}
if (minusOneCount > 1 || inputProduct != outputProduct) {
OPENVINO_THROW("[cpu]reshape: the shape of input data ", ov::intel_cpu::vec2str(inputShape),
" conflicts with the reshape pattern ", ov::intel_cpu::vec2str(outPattern));
}
return {{std::move(outputShape)}, ShapeInferStatus::success};
}
Result SqueezeShapeInfer::infer(const std::vector<std::reference_wrapper<const VectorDims>>& input_shapes,
const std::unordered_map<size_t, MemoryPtr>& data_dependency) {
static constexpr size_t SQUEEZE_SRC = 0, SQUEEZE_PATTERN = 1;
const auto& inputShape = input_shapes[SQUEEZE_SRC].get();
const size_t inputShapeSize = inputShape.size();
auto itr = data_dependency.find(SQUEEZE_PATTERN);
VectorDims outputShape;
outputShape.reserve(inputShapeSize);
if (itr != data_dependency.end()) {
const auto memPtr = data_dependency.at(SQUEEZE_PATTERN);
const auto data = memPtr->getData();
const auto& dims = memPtr->getStaticDims();
if (dims.size() != 0) {
const size_t outputPatternSize = std::accumulate(dims.begin(), dims.end(), 1, std::multiplies<Dim>());
std::vector<int64_t> outPattern = ov::get_raw_data_as<int64_t>(
InferenceEngine::details::convertPrecision(memPtr->getDesc().getPrecision()),
data,
outputPatternSize,
ov::util::Cast<int64_t>());
std::vector<int64_t> originOutPattern = outPattern;
std::vector<bool> removeMask(inputShapeSize, false);
bool existError = false;
for (size_t i = 0; i < outputPatternSize; i++) {
if (outPattern[i] < 0) {
outPattern[i] = inputShapeSize + outPattern[i];
}
if (outPattern[i] >= 0 && outPattern[i] < static_cast<int64_t>(inputShapeSize)) {
removeMask[outPattern[i]] = true;
} else {
existError = true;
break;
}
}
for (size_t i = 0; i < inputShapeSize; i++) {
if (!removeMask[i]) {
outputShape.push_back(inputShape[i]);
} else if (inputShape[i] != 1) {
existError = true;
break;
}
}
if (existError) {
OPENVINO_THROW("[cpu]squeeze: the shape of input data ", ov::intel_cpu::vec2str(inputShape),
" conflicts with the squeeze pattern ", ov::intel_cpu::vec2str(originOutPattern));
}
} else {
for (size_t i = 0; i < inputShapeSize; i++) {
if (inputShape[i] != 1) {
outputShape.push_back(inputShape[i]);
}
}
}
} else {
for (size_t i = 0; i < inputShapeSize; i++) {
if (inputShape[i] != 1) {
outputShape.push_back(inputShape[i]);
}
}
}
return {{std::move(outputShape)}, ShapeInferStatus::success};
}
Result UnsqueezeShapeInfer::infer(const std::vector<std::reference_wrapper<const VectorDims>>& input_shapes,
const std::unordered_map<size_t, MemoryPtr>& data_dependency) {
static constexpr size_t UNSQUEEZE_SRC = 0, UNSQUEEZE_PATTERN = 1;
const auto& inputShape = input_shapes[UNSQUEEZE_SRC].get();
const size_t inputShapeSize = inputShape.size();
const auto memPtr = data_dependency.at(UNSQUEEZE_PATTERN);
const auto data = memPtr->getData();
const auto& dims = memPtr->getStaticDims();
size_t outputPatternSize = std::accumulate(dims.begin(), dims.end(), 1, std::multiplies<Dim>());
std::vector<int64_t> originOutPattern = ov::get_raw_data_as<int64_t>(
InferenceEngine::details::convertPrecision(memPtr->getDesc().getPrecision()),
data,
outputPatternSize,
ov::util::Cast<int64_t>());
// remove repeated pattern
std::unordered_set<int64_t> tmp(originOutPattern.begin(), originOutPattern.end());
std::vector<int64_t> outPattern = std::vector<int64_t>(tmp.begin(), tmp.end());
outputPatternSize = outPattern.size();
size_t outputShapeSize = inputShapeSize + outputPatternSize;
VectorDims outputShape(outputShapeSize, 0);
bool existError = false;
for (size_t i = 0; i < outputPatternSize; i++) {
if (outPattern[i] < 0) {
outPattern[i] = outputShapeSize + outPattern[i];
}
if (outPattern[i] >= 0 && outPattern[i] < static_cast<int64_t>(outputShapeSize)) {
outputShape[outPattern[i]] = 1;
} else {
existError = true;
break;
}
}
for (size_t i = 0, y = 0; i < outputShapeSize; i++) {
if (outputShape[i] == 0) {
if (y < inputShapeSize) {
outputShape[i] = inputShape[y];
y++;
} else {
existError = true;
break;
}
}
}
if (existError) {
OPENVINO_THROW("[cpu]unsqueeze: the shape of input data ", ov::intel_cpu::vec2str(inputShape),
" conflicts with the unsqueeze pattern ", ov::intel_cpu::vec2str(originOutPattern));
}
return {{std::move(outputShape)}, ShapeInferStatus::success};
}
ShapeInferPtr ReshapeShapeInferFactory::makeShapeInfer() const {
if (const auto reshapeOp = ov::as_type_ptr<const ov::op::v1::Reshape>(m_op)) {
return std::make_shared<ReshapeShapeInfer>(reshapeOp->get_special_zero());
} else if (ov::is_type<ov::op::v0::Squeeze>(m_op)) {
return std::make_shared<SqueezeShapeInfer>();
} else if (ov::is_type<ov::op::v0::Unsqueeze>(m_op)) {
return std::make_shared<UnsqueezeShapeInfer>();
} else {
OPENVINO_THROW("[cpu]reshape: ", m_op->get_type_name(), " is not implemented");
}
}
} // namespace node
} // namespace intel_cpu
} // namespace ov

View File

@ -0,0 +1,57 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <node.h>
#include "shape_inference/shape_inference_cpu.hpp"
#pragma once
namespace ov {
namespace intel_cpu {
namespace node {
using Result = IShapeInfer::Result;
class ReshapeShapeInfer : public ShapeInferEmptyPads {
public:
ReshapeShapeInfer(bool specialZero) : m_specialZero(specialZero) {}
Result infer(const std::vector<std::reference_wrapper<const VectorDims>>& input_shapes,
const std::unordered_map<size_t, MemoryPtr>& data_dependency) override;
port_mask_t get_port_mask() const override {
return PortMask(1);
}
private:
bool m_specialZero;
};
class SqueezeShapeInfer : public ShapeInferEmptyPads {
public:
SqueezeShapeInfer() {}
Result infer(const std::vector<std::reference_wrapper<const VectorDims>>& input_shapes,
const std::unordered_map<size_t, MemoryPtr>& data_dependency) override;
port_mask_t get_port_mask() const override {
return PortMask(1);
}
};
class UnsqueezeShapeInfer : public ShapeInferEmptyPads {
public:
UnsqueezeShapeInfer() {}
Result infer(const std::vector<std::reference_wrapper<const VectorDims>>& input_shapes,
const std::unordered_map<size_t, MemoryPtr>& data_dependency) override;
port_mask_t get_port_mask() const override {
return PortMask(1);
}
};
class ReshapeShapeInferFactory : public ShapeInferFactory {
public:
ReshapeShapeInferFactory(std::shared_ptr<ov::Node> op) : m_op(op) {}
ShapeInferPtr makeShapeInfer() const override;
private:
std::shared_ptr<ov::Node> m_op;
};
} // namespace node
} // namespace intel_cpu
} // namespace ov

View File

@ -0,0 +1,44 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <node.h>
#include "shape_inference/shape_inference_cpu.hpp"
#pragma once
namespace ov {
namespace intel_cpu {
namespace node {
using Result = IShapeInfer::Result;
/**
* Implements Shape Of shape inference algorithm. The output shape is simply a 1D tensor with the size of the input tensor
* rank.
*
*/
class ShapeOfShapeInfer : public ShapeInferEmptyPads {
public:
ShapeOfShapeInfer() = default;
Result infer(
const std::vector<std::reference_wrapper<const VectorDims>>& input_shapes,
const std::unordered_map<size_t, MemoryPtr>& data_dependency) override {
IE_ASSERT(!input_shapes.empty());
return {{VectorDims{input_shapes.front().get().size()}}, ShapeInferStatus::success};
}
port_mask_t get_port_mask() const override {
return EMPTY_PORT_MASK;
}
};
class ShapeOfShapeInferFactory : public ShapeInferFactory {
public:
ShapeInferPtr makeShapeInfer() const override {
return std::make_shared<ShapeOfShapeInfer>();
}
};
} // namespace node
} // namespace intel_cpu
} // namespace ov

View File

@ -0,0 +1,94 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "strided_slice.hpp"
#include "utils.hpp"
#include "ie_ngraph_utils.hpp"
#include "slice_shape_inference.hpp"
#include <shape_inference/shape_inference_ngraph.hpp>
namespace ov {
namespace intel_cpu {
namespace node {
using namespace InferenceEngine;
StridedSliceShapeInfer::StridedSliceShapeInfer(size_t output_size,
std::unordered_set<int64_t> begin_mask,
std::unordered_set<int64_t> end_mask,
std::unordered_set<int64_t> new_axis_mask,
std::unordered_set<int64_t> shrink_axis_mask)
: m_outputShape(output_size, 1),
m_begin_mask_set(std::move(begin_mask)),
m_end_mask_set(std::move(end_mask)),
m_new_axis_mask_set(std::move(new_axis_mask)),
m_shrink_axis_mask_set(std::move(shrink_axis_mask)) {}
Result StridedSliceShapeInfer::infer(
const std::vector<std::reference_wrapper<const VectorDims>>& input_shapes,
const std::unordered_map<size_t, MemoryPtr>& data_dependency) {
// align with intel_cpu::node::StridedSlice
static constexpr size_t DATA_ID = 0, BEGIN_ID = 1, END_ID = 2, STRIDE_ID = 3;
const VectorDims& shapeIn = input_shapes[DATA_ID].get();
const VectorDims& shapeBegin = input_shapes[BEGIN_ID].get();
if (data_dependency.at(BEGIN_ID)->getDesc().getPrecision() != Precision::I32 ||
data_dependency.at(END_ID)->getDesc().getPrecision() != Precision::I32 ||
data_dependency.at(STRIDE_ID)->getDesc().getPrecision() != Precision::I32) {
OPENVINO_THROW("The data type of begin/end/stride is NOT I32, which is unexpected!");
}
auto beginPtr = reinterpret_cast<int32_t *>(data_dependency.at(BEGIN_ID)->getData());
auto endPtr = reinterpret_cast<int32_t *>(data_dependency.at(END_ID)->getData());
auto stridePtr = reinterpret_cast<int32_t *>(data_dependency.at(STRIDE_ID)->getData());
for (size_t i = 0, new_idx = 0; i < shapeIn.size(); ++i) {
if (m_new_axis_mask_set.count(i)) {
// deal with new_axis_mask
m_outputShape[new_idx] = 1;
m_outputShape[new_idx+1] = shapeIn[i];
new_idx+=2;
} else if (!m_shrink_axis_mask_set.count(i)) {
// deal with begin_mask and end_mask
if ((i >= shapeBegin[0]) || (shapeIn[i] == 0)) {
m_outputShape[new_idx] = shapeIn[i];
} else {
auto begin = m_begin_mask_set.count(i) ? 0 : beginPtr[i];
auto end = m_end_mask_set.count(i) ? shapeIn[i] : endPtr[i];
m_outputShape[new_idx] = ov::op::slice::get_sliced_value(shapeIn[i], begin, end, stridePtr[i]);
}
new_idx += 1;
}
}
return {{m_outputShape}, ShapeInferStatus::success};
}
ShapeInferPtr StridedSliceShapeInferFactory::makeShapeInfer() const {
if (const auto Slice_op = ov::as_type_ptr<const ov::op::v8::Slice>(m_op)) {
return std::make_shared<NgraphShapeInfer>(make_shape_inference(m_op), port_mask);
} else if (const auto StridedSlice_op = ov::as_type_ptr<const ov::op::v1::StridedSlice>(m_op)) {
const auto& ellipsis_mask = StridedSlice_op->get_ellipsis_mask();
if (std::any_of(ellipsis_mask.begin(), ellipsis_mask.end(), [](int64_t x){ return x == 1; })) {
return std::make_shared<NgraphShapeInfer>(make_shape_inference(m_op), port_mask);
} else {
auto vec_to_set = [](const std::vector<int64_t>& vec){
std::unordered_set<int64_t> to_set;
for (size_t i = 0; i < vec.size(); ++i) {
if (vec[i] == 1) {
to_set.emplace(i);
}
}
return to_set;
};
return std::make_shared<StridedSliceShapeInfer>(
m_op->get_output_partial_shape(0).rank().get_length(),
vec_to_set(StridedSlice_op->get_begin_mask()),
vec_to_set(StridedSlice_op->get_end_mask()),
vec_to_set(StridedSlice_op->get_new_axis_mask()),
vec_to_set(StridedSlice_op->get_shrink_axis_mask()));
}
} else {
OPENVINO_THROW("not Slice or StridedSlice");
}
}
} // namespace node
} // namespace intel_cpu
} // namespace ov

View File

@ -0,0 +1,53 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <node.h>
#include "shape_inference/shape_inference_cpu.hpp"
#pragma once
namespace ov {
namespace intel_cpu {
namespace node {
using Result = IShapeInfer::Result;
constexpr IShapeInfer::port_mask_t port_mask = PortMask(/*BEGIN_ID*/1, /*END_ID*/2, /*STRIDE_ID*/3, /*AXES_ID*/4);
class StridedSliceShapeInfer : public ShapeInferEmptyPads {
public:
StridedSliceShapeInfer(size_t output_size,
std::unordered_set<int64_t> begin_mask,
std::unordered_set<int64_t> end_mask,
std::unordered_set<int64_t> new_axis_mask,
std::unordered_set<int64_t> shrink_axis_mask);
Result infer(
const std::vector<std::reference_wrapper<const VectorDims>>& input_shapes,
const std::unordered_map<size_t, MemoryPtr>& data_dependency) override;
port_mask_t get_port_mask() const override {
return port_mask;
}
private:
VectorDims m_outputShape;
const std::unordered_set<int64_t> m_begin_mask_set;
const std::unordered_set<int64_t> m_end_mask_set;
const std::unordered_set<int64_t> m_new_axis_mask_set;
const std::unordered_set<int64_t> m_shrink_axis_mask_set;
};
class StridedSliceShapeInferFactory : public ShapeInferFactory {
public:
StridedSliceShapeInferFactory(const std::shared_ptr<ov::Node>& op)
: m_op(op) {}
ShapeInferPtr makeShapeInfer() const override;
private:
const std::shared_ptr<ov::Node> m_op;
};
} // namespace node
} // namespace intel_cpu
} // namespace ov

View File

@ -0,0 +1,45 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <node.h>
#include "shape_inference/shape_inference_cpu.hpp"
#pragma once
namespace ov {
namespace intel_cpu {
namespace node {
using Result = IShapeInfer::Result;
/* This class implementation is a temporal WA
TODO: revise the implementation to remove the node reference*/
class SnippetShapeInfer : public ShapeInferEmptyPads {
public:
SnippetShapeInfer(Snippet* node) : m_node(node) {}
Result infer(
const std::vector<std::reference_wrapper<const VectorDims>>& input_shapes,
const std::unordered_map<size_t, MemoryPtr>& data_dependency) override {
return {m_node->shapeInfer(), ShapeInferStatus::success};
}
port_mask_t get_port_mask() const override {
return EMPTY_PORT_MASK;
}
private:
Snippet* m_node;
};
class SnippetShapeInferFactory : public ShapeInferFactory {
public:
SnippetShapeInferFactory(Snippet* node) : m_node(node) {}
ShapeInferPtr makeShapeInfer() const override {
return std::make_shared<SnippetShapeInfer>(m_node);
}
private:
Snippet* m_node;
};
} // namespace node
} // namespace intel_cpu
} // namespace ov

View File

@ -0,0 +1,41 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "transpose.hpp"
#include "utils.hpp"
#include "ie_ngraph_utils.hpp"
namespace ov {
namespace intel_cpu {
namespace node {
using namespace InferenceEngine;
TransposeShapeInfer::TransposeShapeInfer(const size_t& out_rank, const std::vector<size_t>& axes_vec)
: m_out_rank(out_rank), m_axes_vec(axes_vec), m_outputShape(out_rank, 1), m_needReverse(axes_vec.empty()) {}
Result TransposeShapeInfer::infer(
const std::vector<std::reference_wrapper<const VectorDims>>& input_shapes,
const std::unordered_map<size_t, MemoryPtr>& data_dependency) {
const VectorDims& shapeIn = input_shapes[0].get();
if (m_needReverse) {
for (size_t i = 0; i < m_out_rank; ++i) {
m_outputShape[i] = shapeIn[m_out_rank - 1 - i];
}
} else {
for (size_t i = 0; i < m_out_rank; ++i) {
m_outputShape[i] = shapeIn[m_axes_vec[i]];
}
}
return {{m_outputShape}, ShapeInferStatus::success};
}
ShapeInferPtr TransposeShapeInferFactory::makeShapeInfer() const {
if (const auto order = ov::as_type_ptr<const ov::op::v0::Constant>(m_op->get_input_node_shared_ptr(ov::op::v1::Transpose::ORDER))) {
const auto axes_vec = order->cast_vector<size_t>();
return std::make_shared<TransposeShapeInfer>(m_op->get_output_partial_shape(0).rank().get_length(), axes_vec);
} else {
return std::make_shared<TransposeDynShapeInfer>();
}
}
} // namespace node
} // namespace intel_cpu
} // namespace ov

View File

@ -0,0 +1,59 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <node.h>
#include "shape_inference/shape_inference_cpu.hpp"
#pragma once
namespace ov {
namespace intel_cpu {
namespace node {
using Result = IShapeInfer::Result;
class TransposeDynShapeInfer : public ShapeInferEmptyPads {
public:
TransposeDynShapeInfer() = default;
Result infer(
const std::vector<std::reference_wrapper<const VectorDims>>& input_shapes,
const std::unordered_map<size_t, MemoryPtr>& data_dependency) override {
OPENVINO_THROW("TODO: Support parameterized Order input for dynamic shapes.");
}
port_mask_t get_port_mask() const override {
return EMPTY_PORT_MASK;
}
private:
};
class TransposeShapeInfer : public ShapeInferEmptyPads {
public:
TransposeShapeInfer(const size_t& out_rank, const std::vector<size_t>& axes_vec);
Result infer(
const std::vector<std::reference_wrapper<const VectorDims>>& input_shapes,
const std::unordered_map<size_t, MemoryPtr>& data_dependency) override;
port_mask_t get_port_mask() const override {
return EMPTY_PORT_MASK;
}
private:
const size_t m_out_rank;
const std::vector<size_t> m_axes_vec;
VectorDims m_outputShape;
const bool m_needReverse;
};
class TransposeShapeInferFactory : public ShapeInferFactory {
public:
TransposeShapeInferFactory(const std::shared_ptr<ov::Node>& op) : m_op(op) {}
ShapeInferPtr makeShapeInfer() const override;
private:
const std::shared_ptr<ov::Node> m_op;
};
} // namespace node
} // namespace intel_cpu
} // namespace ov

View File

@ -27,7 +27,7 @@ void ov::intel_cpu::NgramNode::validate_and_infer_types() {
NGRAPH_CHECK(m_k > 0, "k attribute must be greater than zero");
const auto& idces_et = get_input_element_type(1);
const auto& idces_shape = get_input_partial_shape(0);
const auto& idces_shape = get_input_partial_shape(1);
NGRAPH_CHECK(idces_shape.rank() == 2, "'batch_idces' input must have 2D shape whereas current shape is", idces_shape);
NGRAPH_CHECK(idces_et.is_integral_number(), "'batch_idces' input must be integer whereas current element type is", idces_et);

View File

@ -81,6 +81,21 @@ ie_faster_build(${TARGET_NAME}
UNITY
)
function(group_source_file GROUP_NAME GROUP_DIR)
file(GLOB GROUP_FILES ${GROUP_DIR}/*.cpp)
foreach(file ${GROUP_FILES})
set_source_files_properties(${file} PROPERTIES UNITY_GROUP ${GROUP_NAME})
endforeach()
endfunction()
if(ENABLE_FASTER_BUILD)
set_target_properties(${TARGET_NAME} PROPERTIES UNITY_BUILD ON UNITY_BUILD_MODE GROUP)
group_source_file(unit_src_nodes ${CMAKE_CURRENT_SOURCE_DIR}/nodes)
group_source_file(unit_src_snippets_transformations ${CMAKE_CURRENT_SOURCE_DIR}/snippets_transformations)
group_source_file(unit_src_ngrah_transformation ${CMAKE_CURRENT_SOURCE_DIR}/ngraph_transformations)
group_source_file(unit_src_custom_shape_infer ${CMAKE_CURRENT_SOURCE_DIR}/shape_inference_test/custom_shape_infer)
endif()
target_include_directories(${TARGET_NAME} SYSTEM PRIVATE
$<TARGET_PROPERTY:dnnl,SOURCE_DIR>/src/common
$<TARGET_PROPERTY:dnnl,SOURCE_DIR>/src/cpu

View File

@ -7,8 +7,8 @@
#include "utils.hpp"
#include "openvino/op/ops.hpp"
#include "openvino/op/parameter.hpp"
#include "utils/shape_inference/shape_inference.hpp"
#include "utils/shape_inference/static_shape.hpp"
#include "shape_inference/shape_inference.hpp"
#include "shape_inference/static_shape.hpp"
using namespace ov;
using namespace ov::intel_cpu;

View File

@ -7,7 +7,7 @@
#include "openvino/op/parameter.hpp"
#include "openvino/pass/graph_rewrite.hpp"
#include "utils.hpp"
#include "utils/shape_inference/static_shape.hpp"
#include "shape_inference/static_shape.hpp"
using namespace ov;
using namespace ov::intel_cpu;

View File

@ -0,0 +1,77 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include "common_test_utils/test_assertions.hpp"
#include "custom_shape_infer.hpp"
#include "openvino/op/ops.hpp"
namespace ov {
namespace intel_cpu {
namespace unit_test {
namespace cpu_shape_infer {
using namespace ov;
using namespace ov::intel_cpu;
using namespace testing;
using AdaptiveAvgPoolV8TestParams = std::tuple<unit_test::ShapeVector, // Input shapes
std::vector<int32_t>, // output_shapes
StaticShape // Expected shape
>;
class AdaptiveAvgPoolV8CpuShapeInferenceTest : public unit_test::OpCpuShapeInferenceTest<op::v8::AdaptiveAvgPool>,
public WithParamInterface<AdaptiveAvgPoolV8TestParams> {
public:
static std::string getTestCaseName(const testing::TestParamInfo<AdaptiveAvgPoolV8TestParams>& obj) {
unit_test::ShapeVector tmp_input_shapes;
std::vector<int32_t> tmp_axes;
StaticShape tmp_exp_shape;
std::tie(tmp_input_shapes, tmp_axes, tmp_exp_shape) = obj.param;
std::ostringstream result;
result << "IS" << CommonTestUtils::vec2str(tmp_input_shapes) << "_";
result << "sd" << CommonTestUtils::vec2str(tmp_axes) << "_";
result << "exp_shape" << tmp_exp_shape;
return result.str();
}
protected:
void SetUp() override {
std::tie(input_shapes, axes, exp_shape) = GetParam();
output_shapes = unit_test::ShapeVector(0);
output_shapes.push_back(exp_shape);
arg = std::make_shared<op::v0::Parameter>(element::f64, input_shapes.front().get_shape());
}
std::vector<int32_t> axes;
std::shared_ptr<op::v0::Parameter> arg;
bool specalZero;
};
TEST_P(AdaptiveAvgPoolV8CpuShapeInferenceTest , shape_inference_empty_const_map) {
const auto axes_node = std::make_shared<op::v0::Constant>(element::i32, ov::Shape{axes.size()}, axes);
const auto op = make_op(arg, axes_node);
unit_test::cpu_test_shape_infer(op.get(), input_shapes, output_shapes);
}
TEST_P(AdaptiveAvgPoolV8CpuShapeInferenceTest , shape_inference_with_const_map) {
const auto axes_node = std::make_shared<op::v0::Parameter>(element::i32, PartialShape::dynamic());
const auto op = make_op(arg, axes_node);
const auto axes_const = std::make_shared<op::v0::Constant>(element::i32, ov::Shape{axes.size()}, axes);
const auto axes_tensor = std::make_shared<ov::HostTensor>(axes_const);
const std::map<size_t, ov::HostTensorPtr>& constant_data = {{1, axes_tensor}};
unit_test::cpu_test_shape_infer(op.get(), input_shapes, output_shapes, constant_data);
}
INSTANTIATE_TEST_SUITE_P(
CpuShapeInfer,
AdaptiveAvgPoolV8CpuShapeInferenceTest ,
Values(make_tuple(unit_test::ShapeVector{{1, 3, 1, 2}, {2}}, std::vector<int32_t>{10, 20}, StaticShape({1, 3, 10, 20})),
make_tuple(unit_test::ShapeVector{{1, 2, 10}, {1}}, std::vector<int32_t>{17}, StaticShape({1, 2, 17}))),
AdaptiveAvgPoolV8CpuShapeInferenceTest::getTestCaseName);
} // namespace cpu_shape_infer
} // namespace unit_test
} // namespace intel_cpu
} // namespace ov

View File

@ -0,0 +1,80 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include "common_test_utils/test_assertions.hpp"
#include "custom_shape_infer.hpp"
#include "openvino/op/ops.hpp"
namespace ov {
namespace intel_cpu {
namespace unit_test {
namespace cpu_shape_infer {
using namespace ov;
using namespace ov::intel_cpu;
using namespace testing;
using AdaptiveMaxPoolV8TestParams = std::tuple<unit_test::ShapeVector, // Input shapes
std::vector<int32_t>, // output_shapes
StaticShape // Expected shape
>;
class AdaptiveMaxPoolV8CpuShapeInferenceTest : public unit_test::OpCpuShapeInferenceTest<op::v8::AdaptiveMaxPool>,
public WithParamInterface<AdaptiveMaxPoolV8TestParams> {
public:
static std::string getTestCaseName(const testing::TestParamInfo<AdaptiveMaxPoolV8TestParams>& obj) {
unit_test::ShapeVector tmp_input_shapes;
std::vector<int32_t> tmp_axes;
StaticShape tmp_exp_shape;
std::tie(tmp_input_shapes, tmp_axes, tmp_exp_shape) = obj.param;
std::ostringstream result;
result << "IS" << CommonTestUtils::vec2str(tmp_input_shapes) << "_";
result << "sd" << CommonTestUtils::vec2str(tmp_axes) << "_";
result << "exp_shape" << tmp_exp_shape;
return result.str();
}
protected:
void SetUp() override {
std::tie(input_shapes, axes, exp_shape) = GetParam();
output_shapes = unit_test::ShapeVector(0);
output_shapes.push_back(exp_shape);
output_shapes.push_back(exp_shape);
arg = std::make_shared<op::v0::Parameter>(element::f64, input_shapes.front().get_shape());
}
std::vector<int32_t> axes;
std::shared_ptr<op::v0::Parameter> arg;
bool specalZero;
};
TEST_P(AdaptiveMaxPoolV8CpuShapeInferenceTest , shape_inference_empty_const_map) {
const auto axes_node = std::make_shared<op::v0::Constant>(element::i32, ov::Shape{axes.size()}, axes);
const auto op = make_op(arg, axes_node);
unit_test::cpu_test_shape_infer(op.get(), input_shapes, output_shapes);
}
TEST_P(AdaptiveMaxPoolV8CpuShapeInferenceTest , shape_inference_with_const_map) {
const auto axes_node = std::make_shared<op::v0::Parameter>(element::i32, PartialShape::dynamic());
const auto op = make_op(arg, axes_node);
const auto axes_const = std::make_shared<op::v0::Constant>(element::i32, ov::Shape{axes.size()}, axes);
const auto axes_tensor = std::make_shared<ov::HostTensor>(axes_const);
const std::map<size_t, ov::HostTensorPtr>& constant_data = {{1, axes_tensor}};
unit_test::cpu_test_shape_infer(op.get(), input_shapes, output_shapes, constant_data);
}
INSTANTIATE_TEST_SUITE_P(
CpuShapeInfer,
AdaptiveMaxPoolV8CpuShapeInferenceTest ,
Values(make_tuple(unit_test::ShapeVector{{1, 3, 1, 2}, {2}}, std::vector<int32_t>{10, 20}, StaticShape({1, 3, 10, 20})),
make_tuple(unit_test::ShapeVector{{1, 2, 10}, {1}}, std::vector<int32_t>{17}, StaticShape({1, 2, 17}))),
AdaptiveMaxPoolV8CpuShapeInferenceTest::getTestCaseName);
} // namespace cpu_shape_infer
} // namespace unit_test
} // namespace intel_cpu
} // namespace ov

View File

@ -0,0 +1,124 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include "openvino/op/ops.hpp"
#include "common_test_utils/test_assertions.hpp"
#include "custom_shape_infer.hpp"
namespace ov {
namespace intel_cpu {
namespace unit_test {
namespace cpu_shape_infer {
using namespace ov;
using namespace ov::intel_cpu;
template <class T>
class CpuShapeInferenceTest_BEA : public testing::Test {};
// CpuShapeInferenceTest for BinaryElementwiseArithmetis (BEA) operations
TYPED_TEST_SUITE_P(CpuShapeInferenceTest_BEA);
TYPED_TEST_P(CpuShapeInferenceTest_BEA, shape_inference_autob_numpy_equal_rank) {
auto A = std::make_shared<op::v0::Parameter>(element::f32, PartialShape{-1, -1, -1, -1});
auto B = std::make_shared<op::v0::Parameter>(element::f32, PartialShape{-1, -1, -1, -1});
auto node = std::make_shared<TypeParam>(A, B);
std::vector<StaticShape> static_input_shapes = {StaticShape{3, 1, 1, 5}, StaticShape{3, 1, 6, 1}},
static_output_shapes = {StaticShape{3, 1, 6, 5}};
unit_test::cpu_test_shape_infer(node.get(), static_input_shapes, static_output_shapes);
}
TYPED_TEST_P(CpuShapeInferenceTest_BEA, shape_inference_autob_numpy_a_rank_higher) {
auto A = std::make_shared<op::v0::Parameter>(element::f32, PartialShape{-1, -1, -1, -1});
auto B = std::make_shared<op::v0::Parameter>(element::f32, PartialShape{-1, -1, -1});
auto node = std::make_shared<TypeParam>(A, B);
std::vector<StaticShape> static_input_shapes = {StaticShape{3, 4, 1, 5}, StaticShape{4, 6, 1}},
static_output_shapes = {StaticShape{3, 4, 6, 5}};
unit_test::cpu_test_shape_infer(node.get(), static_input_shapes, static_output_shapes);
}
TYPED_TEST_P(CpuShapeInferenceTest_BEA, shape_inference_autob_numpy_b_rank_higher) {
auto A = std::make_shared<op::v0::Parameter>(element::f32, PartialShape{-1, -1, -1});
auto B = std::make_shared<op::v0::Parameter>(element::f32, PartialShape{-1, -1, -1, -1});
auto node = std::make_shared<TypeParam>(A, B);
std::vector<StaticShape> static_input_shapes = {StaticShape{4, 6, 1}, StaticShape{3, 4, 1, 5}},
static_output_shapes = {StaticShape{3, 4, 6, 5}};
unit_test::cpu_test_shape_infer(node.get(), static_input_shapes, static_output_shapes);
}
TYPED_TEST_P(CpuShapeInferenceTest_BEA, shape_inference_autob_numpy_incompatible_shapes) {
auto A = std::make_shared<op::v0::Parameter>(element::f32, PartialShape{-1, -1, -1, -1});
auto B = std::make_shared<op::v0::Parameter>(element::f32, PartialShape{-1, -1, -1, -1});
auto node = std::make_shared<TypeParam>(A, B);
std::vector<StaticShape> static_input_shapes = {StaticShape{3, 4, 6, 5}, StaticShape{2, 4, 6, 5}},
static_output_shapes = {StaticShape{}};
OV_EXPECT_THROW(unit_test::cpu_test_shape_infer(node.get(), static_input_shapes, static_output_shapes),
ov::Exception,
testing::HasSubstr("Eltwise shape infer input shapes dim index:"));
}
TYPED_TEST_P(CpuShapeInferenceTest_BEA, shape_inference_aubtob_none) {
auto A = std::make_shared<op::v0::Parameter>(element::f32, PartialShape{-1, -1, -1, -1});
auto B = std::make_shared<op::v0::Parameter>(element::f32, PartialShape{-1, -1, -1, -1});
auto node = std::make_shared<TypeParam>(A, B, op::AutoBroadcastType::NONE);
std::vector<StaticShape> static_input_shapes = {StaticShape{3, 4, 6, 5}, StaticShape{3, 4, 6, 5}},
static_output_shapes = {StaticShape{3, 4, 6, 5}};
unit_test::cpu_test_shape_infer(node.get(), static_input_shapes, static_output_shapes);
}
TYPED_TEST_P(CpuShapeInferenceTest_BEA, shape_inference_aubtob_none_incompatible_shapes) {
GTEST_SKIP() << "Skipping test, please check CVS-108946";
auto A = std::make_shared<op::v0::Parameter>(element::f32, PartialShape{-1, -1, -1, -1});
auto B = std::make_shared<op::v0::Parameter>(element::f32, PartialShape{-1, -1, -1, -1});
auto node = std::make_shared<TypeParam>(A, B, op::AutoBroadcastType::NONE);
std::vector<StaticShape> static_input_shapes = {StaticShape{3, 4, 6, 5}, StaticShape{3, 1, 6, 1}},
static_output_shapes = {StaticShape{}};
//TODO cvs-108946, below test can't pass.
OV_EXPECT_THROW(unit_test::cpu_test_shape_infer(node.get(), static_input_shapes, static_output_shapes),
ov::Exception,
testing::HasSubstr("Eltwise shape infer input shapes dim index:"));
}
REGISTER_TYPED_TEST_SUITE_P(CpuShapeInferenceTest_BEA,
shape_inference_autob_numpy_equal_rank,
shape_inference_autob_numpy_a_rank_higher,
shape_inference_autob_numpy_b_rank_higher,
shape_inference_autob_numpy_incompatible_shapes,
shape_inference_aubtob_none,
shape_inference_aubtob_none_incompatible_shapes);
INSTANTIATE_TYPED_TEST_SUITE_P(CpuShapeInfer_add, CpuShapeInferenceTest_BEA, ::testing::Types<op::v1::Add>);
INSTANTIATE_TYPED_TEST_SUITE_P(CpuShapeInfer_divide, CpuShapeInferenceTest_BEA, ::testing::Types<op::v1::Divide>);
INSTANTIATE_TYPED_TEST_SUITE_P(CpuShapeInfer_floor_mod, CpuShapeInferenceTest_BEA, ::testing::Types<op::v1::FloorMod>);
INSTANTIATE_TYPED_TEST_SUITE_P(CpuShapeInfer_maximum, CpuShapeInferenceTest_BEA, ::testing::Types<op::v1::Maximum>);
INSTANTIATE_TYPED_TEST_SUITE_P(CpuShapeInfer_minimum, CpuShapeInferenceTest_BEA, ::testing::Types<op::v1::Minimum>);
INSTANTIATE_TYPED_TEST_SUITE_P(CpuShapeInfer_mod, CpuShapeInferenceTest_BEA, ::testing::Types<op::v1::Mod>);
INSTANTIATE_TYPED_TEST_SUITE_P(CpuShapeInfer_multiply, CpuShapeInferenceTest_BEA, ::testing::Types<op::v1::Multiply>);
INSTANTIATE_TYPED_TEST_SUITE_P(CpuShapeInfer_power, CpuShapeInferenceTest_BEA, ::testing::Types<op::v1::Power>);
INSTANTIATE_TYPED_TEST_SUITE_P(CpuShapeInfer_squared_difference, CpuShapeInferenceTest_BEA, ::testing::Types<op::v0::SquaredDifference>);
INSTANTIATE_TYPED_TEST_SUITE_P(CpuShapeInfer_subtract, CpuShapeInferenceTest_BEA, ::testing::Types<op::v1::Subtract>);
} // namespace cpu_shape_infer
} // namespace unit_test
} // namespace intel_cpu
} // namespace ov

View File

@ -0,0 +1,110 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include "common_test_utils/test_assertions.hpp"
#include "openvino/op/ops.hpp"
#include "custom_shape_infer.hpp"
namespace ov {
namespace intel_cpu {
namespace unit_test {
namespace cpu_shape_infer {
using namespace ov;
using namespace ov::intel_cpu;
template <class T>
class CpuShapeInferenceTest_ColorConvertNV12 : public testing::Test {};
// CpuShapeInferenceTest for BinaryElementwiseArithmetis (ColorConvert) operations
TYPED_TEST_SUITE_P(CpuShapeInferenceTest_ColorConvertNV12);
TYPED_TEST_P(CpuShapeInferenceTest_ColorConvertNV12, singlePlane) {
auto data = std::make_shared<ov::op::v0::Parameter>(element::f32, PartialShape{-1, -1, -1, -1});
auto op = std::make_shared<TypeParam>(data);
std::vector<StaticShape> static_input_shapes = {StaticShape{1, 720, 640, 1}};
std::vector<StaticShape> static_output_shapes = {StaticShape{1, 480, 640, 3}};
unit_test::cpu_test_shape_infer(op.get(), static_input_shapes, static_output_shapes);
}
TYPED_TEST_P(CpuShapeInferenceTest_ColorConvertNV12, multiPlane) {
auto dataY = std::make_shared<ov::op::v0::Parameter>(element::f32, PartialShape{-1, -1, -1, -1});
auto dataUV = std::make_shared<ov::op::v0::Parameter>(element::f32, PartialShape{-1, -1, -1, -1});
auto op = std::make_shared<TypeParam>(dataY, dataUV);
std::vector<StaticShape> static_input_shapes = {StaticShape{1, 480, 640, 1}, StaticShape{1, 240, 320, 2}};
std::vector<StaticShape> static_output_shapes = {StaticShape{1, 480, 640, 3}};
unit_test::cpu_test_shape_infer(op.get(), static_input_shapes, static_output_shapes);
}
TYPED_TEST_P(CpuShapeInferenceTest_ColorConvertNV12, novalid_input) {
auto dataY = std::make_shared<ov::op::v0::Parameter>(element::f32, PartialShape{-1, -1, -1, -1});
auto dataUV = std::make_shared<ov::op::v0::Parameter>(element::f32, PartialShape{-1, -1, -1, -1});
auto op = std::make_shared<TypeParam>(dataY, dataUV);
std::vector<StaticShape> static_input_shapes = {StaticShape{480, 640, 1}, StaticShape{240, 320, 2}};
std::vector<StaticShape> static_output_shapes = {StaticShape{}};
OV_EXPECT_THROW(unit_test::cpu_test_shape_infer(op.get(), static_input_shapes, static_output_shapes),
ov::Exception,
testing::HasSubstr("NV12Converter node has incorrect input dimensions"));
}
REGISTER_TYPED_TEST_SUITE_P(CpuShapeInferenceTest_ColorConvertNV12,
singlePlane,
multiPlane,
novalid_input);
INSTANTIATE_TYPED_TEST_SUITE_P(CpuShapeInfer_NV12toBGR, CpuShapeInferenceTest_ColorConvertNV12, ::testing::Types<op::v8::NV12toBGR>);
INSTANTIATE_TYPED_TEST_SUITE_P(CpuShapeInfer_NV12toRGB, CpuShapeInferenceTest_ColorConvertNV12, ::testing::Types<op::v8::NV12toRGB>);
template <class T>
class CpuShapeInferenceTest_ColorConvertI420 : public testing::Test {};
// CpuShapeInferenceTest for BinaryElementwiseArithmetis (ColorConvert) operations
TYPED_TEST_SUITE_P(CpuShapeInferenceTest_ColorConvertI420);
TYPED_TEST_P(CpuShapeInferenceTest_ColorConvertI420, singlePlane) {
auto data = std::make_shared<ov::op::v0::Parameter>(element::f32, PartialShape{-1, -1, -1, -1});
auto op = std::make_shared<TypeParam>(data);
std::vector<StaticShape> static_input_shapes = {StaticShape{1, 720, 640, 1}};
std::vector<StaticShape> static_output_shapes = {StaticShape{1, 480, 640, 3}};
unit_test::cpu_test_shape_infer(op.get(), static_input_shapes, static_output_shapes);
}
TYPED_TEST_P(CpuShapeInferenceTest_ColorConvertI420, multiPlane) {
auto dataY = std::make_shared<ov::op::v0::Parameter>(element::f32, PartialShape{-1, -1, -1, -1});
auto dataU = std::make_shared<ov::op::v0::Parameter>(element::f32, PartialShape{-1, -1, -1, -1});
auto dataV = std::make_shared<ov::op::v0::Parameter>(element::f32, PartialShape{-1, -1, -1, -1});
auto op = std::make_shared<TypeParam>(dataY, dataU, dataV);
std::vector<StaticShape> static_input_shapes = {StaticShape{1, 480, 640, 1}, StaticShape{1, 240, 320, 1}, StaticShape{1, 240, 320, 1}};
std::vector<StaticShape> static_output_shapes = {StaticShape{1, 480, 640, 3}};
unit_test::cpu_test_shape_infer(op.get(), static_input_shapes, static_output_shapes);
}
TYPED_TEST_P(CpuShapeInferenceTest_ColorConvertI420, novalid_input) {
auto dataY = std::make_shared<ov::op::v0::Parameter>(element::f32, PartialShape{-1, -1, -1, -1});
auto dataU = std::make_shared<ov::op::v0::Parameter>(element::f32, PartialShape{-1, -1, -1, -1});
auto dataV = std::make_shared<ov::op::v0::Parameter>(element::f32, PartialShape{-1, -1, -1, -1});
auto op = std::make_shared<TypeParam>(dataY, dataU, dataV);
std::vector<StaticShape> static_input_shapes = {StaticShape{480, 640, 1}, StaticShape{240, 320, 1}, StaticShape{240, 320, 1}};
std::vector<StaticShape> static_output_shapes = {StaticShape{}};
OV_EXPECT_THROW(unit_test::cpu_test_shape_infer(op.get(), static_input_shapes, static_output_shapes),
ov::Exception,
testing::HasSubstr("NV12Converter node has incorrect input dimensions"));
}
REGISTER_TYPED_TEST_SUITE_P(CpuShapeInferenceTest_ColorConvertI420,
singlePlane,
multiPlane,
novalid_input);
INSTANTIATE_TYPED_TEST_SUITE_P(CpuShapeInfer_I420toBGR, CpuShapeInferenceTest_ColorConvertI420, ::testing::Types<op::v8::I420toBGR>);
INSTANTIATE_TYPED_TEST_SUITE_P(CpuShapeInfer_I420toRGB, CpuShapeInferenceTest_ColorConvertI420, ::testing::Types<op::v8::I420toRGB>);
} // namespace cpu_shape_infer
} // namespace unit_test
} // namespace intel_cpu
} // namespace ov

View File

@ -0,0 +1,146 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <openvino/cc/factory.h>
#include "openvino/core/partial_shape.hpp"
#include "openvino/core/type.hpp"
#include "openvino/op/ops.hpp"
#include "openvino/op/parameter.hpp"
#include "shape_inference/custom/reshape.hpp"
#include "shape_inference/custom/gather.hpp"
#include "shape_inference/custom/transpose.hpp"
#include "shape_inference/custom/color_convert.hpp"
#include "shape_inference/custom/eltwise.hpp"
#include "shape_inference/custom/adaptive_pooling.hpp"
#include "shape_inference/custom/fullyconnected.hpp"
#include "shape_inference/custom/matmul.hpp"
#include "shape_inference/custom/ngram.hpp"
#include "shape_inference/custom/one_hot.hpp"
#include "shape_inference/custom/priorbox.hpp"
#include "shape_inference/custom/priorbox_clustered.hpp"
#include "shape_inference/custom/shapeof.hpp"
#include "shape_inference/custom/strided_slice.hpp"
#include "ie_ngraph_utils.hpp"
#include "custom_shape_infer.hpp"
#include "shape_inference/shape_inference_status.hpp"
#include <gtest/gtest.h>
namespace ov {
namespace intel_cpu {
namespace unit_test {
namespace {
#define INTEL_CPU_CUSTOM_SHAPE_INFER(__prim, __type) \
registerNodeIfRequired(intel_cpu, __prim, __type, __prim)
class EltwiseShapeInferTestFactory : public node::EltwiseShapeInferFactory {
public:
EltwiseShapeInferTestFactory(std::shared_ptr<ov::Node> op) : EltwiseShapeInferFactory() {}
};
class ShapeOfShapeInferTestFactory : public node::ShapeOfShapeInferFactory {
public:
ShapeOfShapeInferTestFactory(std::shared_ptr<ov::Node> op) : ShapeOfShapeInferFactory() {}
};
class CustomShapeInferFF : public openvino::cc::Factory<Type, ShapeInferFactory*(const std::shared_ptr<ov::Node>& op)> {
public:
CustomShapeInferFF():Factory("CpuCustomShapeInferTestFactory") {
INTEL_CPU_CUSTOM_SHAPE_INFER(node::AdaptivePoolingShapeInferFactory, Type::AdaptivePooling);
INTEL_CPU_CUSTOM_SHAPE_INFER(EltwiseShapeInferTestFactory, Type::Eltwise);
INTEL_CPU_CUSTOM_SHAPE_INFER(node::FCShapeInferFactory, Type::FullyConnected);
INTEL_CPU_CUSTOM_SHAPE_INFER(node::TransposeShapeInferFactory, Type::Transpose);
INTEL_CPU_CUSTOM_SHAPE_INFER(ShapeOfShapeInferTestFactory, Type::ShapeOf);
INTEL_CPU_CUSTOM_SHAPE_INFER(node::ColorConvertShapeInferFactory, Type::ColorConvert);
INTEL_CPU_CUSTOM_SHAPE_INFER(node::ReshapeShapeInferFactory, Type::Reshape);
INTEL_CPU_CUSTOM_SHAPE_INFER(node::MMShapeInferFactory, Type::MatMul);
INTEL_CPU_CUSTOM_SHAPE_INFER(node::OneHotShapeInferFactory, Type::OneHot);
INTEL_CPU_CUSTOM_SHAPE_INFER(node::StridedSliceShapeInferFactory, Type::StridedSlice);
INTEL_CPU_CUSTOM_SHAPE_INFER(node::PriorBoxShapeInferFactory, Type::PriorBox);
INTEL_CPU_CUSTOM_SHAPE_INFER(node::PriorBoxClusteredShapeInferFactory, Type::PriorBoxClustered);
INTEL_CPU_CUSTOM_SHAPE_INFER(node::NgramShapeInferFactory, Type::Ngram);
INTEL_CPU_CUSTOM_SHAPE_INFER(node::GatherShapeInferFactory, Type::Gather);
#undef INTEL_CPU_CUSTOM_SHAPE_INFER
}
ShapeInferFactory* create(const std::shared_ptr<ov::Node>& op) {
ShapeInferFactory* newShapeInferFactory = nullptr;
std::unique_ptr<ShapeInferFactory> ol(createNodeIfRegistered(intel_cpu, TypeFromName(op->get_type_name()), op));
if (ol != nullptr) {
newShapeInferFactory = ol.release();
}
return newShapeInferFactory;
}
};
void compare_result(const std::vector<StaticShape>& ref, const std::vector<VectorDims>& cus) {
ASSERT_EQ(ref.size(), cus.size());
for (size_t i = 0; i < ref.size(); i++) {
ASSERT_EQ(ref[i].size(), cus[i].size());
for (size_t y = 0; y < ref[i].size(); y++) {
ASSERT_EQ(ref[i][y].get_length(), cus[i][y]);
}
}
}
} //namespace
void cpu_test_shape_infer(ov::Node* op,
const std::vector<StaticShape>& input_shapes,
std::vector<StaticShape>& output_shapes,
const std::map<size_t, HostTensorPtr>& constant_data) {
static std::shared_ptr<CustomShapeInferFF> cusFactory = std::make_shared<CustomShapeInferFF>();
auto shapeInferFactory = cusFactory->create(op->shared_from_this());
ASSERT_TRUE(shapeInferFactory != nullptr);
auto cusShapeInfer = shapeInferFactory->makeShapeInfer();
std::vector<std::reference_wrapper<const VectorDims>> cusInputShapes;
std::vector<VectorDims> tmpInputShapes;
cusInputShapes.reserve(input_shapes.size());
tmpInputShapes.reserve(input_shapes.size());
for (size_t port = 0; port < input_shapes.size(); ++port) {
VectorDims dims;
for (size_t i = 0; i < input_shapes[port].size(); ++i) {
dims.emplace_back(input_shapes[port][i].get_length());
}
tmpInputShapes.emplace_back(dims);
cusInputShapes.emplace_back(std::ref(tmpInputShapes[port]));
}
std::unordered_map<size_t, MemoryPtr> cusInputValues;
auto input_value_port_mask = cusShapeInfer->get_port_mask();
dnnl::engine eng;
if (input_value_port_mask) {
for (size_t port = 0; port < input_shapes.size(); ++port) {
if (input_value_port_mask & (1 << port)) {
const auto tensorIter = constant_data.find(port);
const void* data = nullptr;
ov::element::Type elementType;
if (tensorIter != constant_data.end()) {
const auto tensor = tensorIter->second;
data = tensor->get_data_ptr();
elementType = tensor->get_element_type();
} else {
const auto input_op = op->input_value(port).get_node_shared_ptr();
const auto const_op = ov::as_type_ptr<const ov::op::v0::Constant>(input_op);
ASSERT_TRUE(const_op != nullptr);
data = const_op->get_data_ptr();
elementType = const_op->get_element_type();
}
CpuBlockedMemoryDesc desc(
InferenceEngine::details::convertPrecision(elementType),
ov::intel_cpu::Shape(tmpInputShapes[port]));
MemoryPtr memoryPtr = std::make_shared<Memory>(eng, desc, data, true);
cusInputValues[port] = memoryPtr;
}
}
}
auto result = cusShapeInfer->infer(cusInputShapes, cusInputValues);
compare_result(output_shapes, result.dims);
ASSERT_TRUE(result.status == ov::intel_cpu::ShapeInferStatus::success);
}
std::string boolToString(const bool value) {
return value ? "true" : "false";
}
} // namespace unit_test
} // namespace intel_cpu
} // namespace ov

View File

@ -0,0 +1,41 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "cpu_types.h"
#include <shape_inference/shape_inference_cpu.hpp>
#include <shape_inference/static_shape.hpp>
#include <common_test_utils/common_utils.hpp>
#include <gtest/gtest.h>
#pragma once
namespace ov {
namespace intel_cpu {
namespace unit_test {
void cpu_test_shape_infer(ov::Node* op,
const std::vector<StaticShape>& input_shapes,
std::vector<StaticShape>& output_shapes,
const std::map<size_t, HostTensorPtr>& constant_data = {});
using ShapeVector = std::vector<ov::intel_cpu::StaticShape>;
template <class TOp>
class OpCpuShapeInferenceTest : public testing::Test {
protected:
using op_type = TOp;
ShapeVector input_shapes, output_shapes;
ov::intel_cpu::StaticShape exp_shape;
std::shared_ptr<TOp> op;
template <class... Args>
std::shared_ptr<TOp> make_op(Args&&... args) {
return std::make_shared<TOp>(std::forward<Args>(args)...);
}
};
std::string boolToString(const bool value);
} // namespace unit_test
} // namespace intel_cpu
} // namespace ov

View File

@ -0,0 +1,30 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include "custom_shape_infer.hpp"
#include "openvino/op/ops.hpp"
namespace ov {
namespace intel_cpu {
namespace unit_test {
namespace cpu_shape_infer {
using namespace ov;
using namespace ov::intel_cpu;
TEST(CpuShapeInfer, UnaryEltwiseTest) {
auto data = std::make_shared<ov::op::v0::Parameter>(element::f32, PartialShape{-1, -1, -1, -1});
auto node = std::make_shared<ov::op::v0::Relu>(data);
std::vector<StaticShape> static_input_shapes = {StaticShape{3, 6, 5, 5}},
static_output_shapes = {StaticShape{3, 6, 5, 5}};
unit_test::cpu_test_shape_infer(node.get(), static_input_shapes, static_output_shapes);
}
} // namespace cpu_shape_infer
} // namespace unit_test
} // namespace intel_cpu
} // namespace ov

View File

@ -0,0 +1,29 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include "transformations/cpu_opset/common/op/fully_connected.hpp"
#include "custom_shape_infer.hpp"
namespace ov {
namespace intel_cpu {
namespace unit_test {
namespace cpu_shape_infer {
using namespace ov;
using namespace ov::intel_cpu;
TEST(CpuShapeInfer, FC_InputSize_2) {
auto activate = std::make_shared<ov::op::v0::Parameter>(element::f32, PartialShape{-1, -1 });
auto weight = std::make_shared<ov::op::v0::Parameter>(element::f32, PartialShape{5, 6});
auto op = std::make_shared<ov::intel_cpu::FullyConnectedNode>(activate, weight, ov::Rank(5), element::f32);
std::vector<StaticShape> static_input_shapes = {StaticShape{720, 640}, {5, 6}};
std::vector<StaticShape> static_output_shapes = {StaticShape{1, 1, 1, 720, 5}};
unit_test::cpu_test_shape_infer(op.get(), static_input_shapes, static_output_shapes);
}
} // namespace cpu_shape_infer
} // namespace unit_test
} // namespace intel_cpu
} // namespace ov

View File

@ -0,0 +1,86 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include "custom_shape_infer.hpp"
#include "openvino/op/ops.hpp"
namespace ov {
namespace intel_cpu {
namespace unit_test {
namespace cpu_shape_infer {
using namespace ov;
using namespace ov::intel_cpu;
using namespace testing;
using TestParams = std::tuple<int32_t, unit_test::ShapeVector, StaticShape>;
template <typename TGather>
class CpuShapeInferenceGatherTest : public unit_test::OpCpuShapeInferenceTest<TGather> {
protected:
void SetUp() override {
this->output_shapes.resize(0);
}
std::shared_ptr<TGather> make_gather(const unit_test::ShapeVector& shapes, const int32_t* const axis_val_ptr = nullptr) {
const auto p_dims = std::vector<Dimension>(shapes[0].size(), -1);
const auto i_dims = std::vector<Dimension>(shapes[1].size(), -1);
auto param = std::make_shared<op::v0::Parameter>(element::f32, PartialShape{p_dims});
auto indicies = std::make_shared<op::v0::Parameter>(element::i32, PartialShape{i_dims});
if (axis_val_ptr) {
auto axis = op::v0::Constant::create(element::i32, ov::Shape{}, {*axis_val_ptr});
return this->make_op(param, indicies, axis);
} else {
auto axis = std::make_shared<op::v0::Parameter>(element::i32, PartialShape{});
return this->make_op(param, indicies, axis);
}
}
int32_t axis_val;
};
// Parameters for typed test used test case internal loop.
const auto GatherTestParams =
std::vector<TestParams>{make_tuple(0, unit_test::ShapeVector{{3, 2}, {2, 2}, {1}}, StaticShape({2, 2, 2})),
make_tuple(1, unit_test::ShapeVector{{3, 2}, {2, 2}, {1}}, StaticShape({3, 2, 2})),
make_tuple(-1, unit_test::ShapeVector{{3, 2}, {2, 2}, {1}}, StaticShape({3, 2, 2})),
make_tuple(0, unit_test::ShapeVector{{3, 2, 4}, {2, 1, 2}, {1}}, StaticShape({2, 1, 2, 2, 4})),
make_tuple(1, unit_test::ShapeVector{{3, 2, 4}, {2, 1, 2}, {1}}, StaticShape({3, 2, 1, 2, 4})),
make_tuple(-1, unit_test::ShapeVector{{3, 2, 4}, {2, 1, 2}, {}}, StaticShape({3, 2, 2, 1, 2})),
make_tuple(-2, unit_test::ShapeVector{{3, 2, 4}, {2, 1, 2}, {}}, StaticShape({3, 2, 1, 2, 4}))};
TYPED_TEST_SUITE_P(CpuShapeInferenceGatherTest);
TYPED_TEST_P(CpuShapeInferenceGatherTest, axis_const) {
for (auto&& params : GatherTestParams) {
std::tie(this->axis_val, this->input_shapes, this->exp_shape) = params;
auto op = this->make_gather(this->input_shapes, &this->axis_val);
this->output_shapes = {this->exp_shape};
unit_test::cpu_test_shape_infer(op.get(), this->input_shapes, this->output_shapes);
}
}
TYPED_TEST_P(CpuShapeInferenceGatherTest, axis_in_const_map) {
for (auto&& params : GatherTestParams) {
std::tie(this->axis_val, this->input_shapes, this->exp_shape) = params;
auto op = this->make_gather(this->input_shapes);
auto axis_tensor = std::make_shared<HostTensor>(element::i32, ov::Shape{1}, &this->axis_val);
this->output_shapes = {this->exp_shape};
unit_test::cpu_test_shape_infer(op.get(), this->input_shapes, this->output_shapes, {{2, axis_tensor}});
}
}
REGISTER_TYPED_TEST_SUITE_P(CpuShapeInferenceGatherTest, axis_const, axis_in_const_map);
using GatherTypes = Types<op::v1::Gather, op::v7::Gather, op::v8::Gather>;
INSTANTIATE_TYPED_TEST_SUITE_P(CpuShapeInfer, CpuShapeInferenceGatherTest, GatherTypes);
} // namespace cpu_shape_infer
} // namespace unit_test
} // namespace intel_cpu
} // namespace ov

View File

@ -0,0 +1,148 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include "custom_shape_infer.hpp"
#include "openvino/op/ops.hpp"
namespace ov {
namespace intel_cpu {
namespace unit_test {
namespace cpu_shape_infer {
using namespace ov;
using namespace ov::intel_cpu;
using namespace testing;
using matmul_test_params_t = std::tuple<StaticShape, // Input A shape
StaticShape // Input B shape
>;
class CPUMatMulTest : public TestWithParam<matmul_test_params_t> {
public:
static std::string getTestCaseName(const testing::TestParamInfo<matmul_test_params_t>& obj) {
StaticShape tmp_input_shape_A;
StaticShape tmp_input_shape_B;
std::tie(tmp_input_shape_A, tmp_input_shape_B) = obj.param;
std::ostringstream result;
result << "IA" << tmp_input_shape_A << "_";
result << "IB" << tmp_input_shape_B;
return result.str();
}
protected:
void SetUp() override {
std::tie(a_shape, b_shape) = GetParam();
set_exp_shape();
output_shapes.clear();
output_shapes.push_back(exp_shape);
}
std::shared_ptr<op::v0::MatMul> make_matmul(const size_t& a_dim_count,
const size_t& b_dim_count,
const bool transpose_a,
const bool transpose_b) {
auto a_input = std::make_shared<op::v0::Parameter>(element::i64, PartialShape::dynamic(a_dim_count));
auto b_input = std::make_shared<op::v0::Parameter>(element::i64, PartialShape::dynamic(b_dim_count));
return std::make_shared<op::v0::MatMul>(a_input, b_input, transpose_a, transpose_b);
}
void set_exp_shape() {
if (a_shape.size() > 1 && b_shape.size() > 1) {
std::transform(a_shape.cbegin(),
a_shape.cend() - 2,
b_shape.cbegin(),
std::back_inserter(exp_shape),
[](const StaticDimension& a, const StaticDimension& b) {
return std::max(a.get_length(), b.get_length());
});
exp_shape.push_back(*std::next((*a_shape).rbegin()));
exp_shape.push_back((*b_shape).back());
} else if (a_shape.size() == 1 && b_shape.size() > 1) {
exp_shape = b_shape;
(*exp_shape).erase(std::prev((*exp_shape).end(), 2));
} else if (b_shape.size() == 1 && a_shape.size() > 1) {
exp_shape = a_shape;
(*exp_shape).erase(std::prev((*exp_shape).end()));
}
}
static StaticShape make_transpose_input(const StaticShape& in) {
StaticShape out(in);
if (out.size() > 1) {
std::iter_swap((*out).rbegin(), std::next((*out).rbegin()));
}
return out;
}
StaticShape a_shape, b_shape, exp_shape;
unit_test::ShapeVector output_shapes;
};
TEST_P(CPUMatMulTest, no_input_transpose) {
GTEST_SKIP() << "Skipping test, please check CVS-108946";
const auto matmul = make_matmul(a_shape.size(), b_shape.size(), false, false);
std::vector<StaticShape> static_input_shapes = {a_shape, b_shape};
// TODO 108946,below test case can't pass
unit_test::cpu_test_shape_infer(matmul.get(), static_input_shapes, output_shapes);
}
TEST_P(CPUMatMulTest, transpose_input_a) {
GTEST_SKIP() << "Skipping test, please check CVS-108946";
const auto matmul = make_matmul(a_shape.size(), b_shape.size(), true, false);
const auto a_transpose = make_transpose_input(a_shape);
std::vector<StaticShape> static_input_shapes = {a_transpose, b_shape};
// TODO 108946,below test case can't pass
unit_test::cpu_test_shape_infer(matmul.get(), static_input_shapes, output_shapes);
}
TEST_P(CPUMatMulTest, transpose_input_b) {
GTEST_SKIP() << "Skipping test, please check CVS-108946";
const auto matmul = make_matmul(a_shape.size(), b_shape.size(), false, true);
const auto b_transpose = make_transpose_input(b_shape);
std::vector<StaticShape> static_input_shapes = {a_shape, b_transpose};
// TODO 108946,below test case can't pass
unit_test::cpu_test_shape_infer(matmul.get(), static_input_shapes, output_shapes);
}
TEST_P(CPUMatMulTest, transpose_inputs_a_b) {
GTEST_SKIP() << "Skipping test, please check CVS-108946";
const auto matmul = make_matmul(a_shape.size(), b_shape.size(), true, true);
const auto a_transpose = make_transpose_input(a_shape);
const auto b_transpose = make_transpose_input(b_shape);
std::vector<StaticShape> static_input_shapes = {a_transpose, b_transpose};
// TODO 108946,below test case can't pass
unit_test::cpu_test_shape_infer(matmul.get(), static_input_shapes, output_shapes);
}
/** \brief Use transpose order -> output shape dimensions shall be as transpose order. */
INSTANTIATE_TEST_SUITE_P(CpuShapeInfer,
CPUMatMulTest,
Values(make_tuple(StaticShape({1}), StaticShape({1})),
make_tuple(StaticShape({1}), StaticShape({1, 3})),
make_tuple(StaticShape({1}), StaticShape({1, 1, 3})),
make_tuple(StaticShape({3, 1}), StaticShape({1})),
make_tuple(StaticShape({3, 2, 1}), StaticShape({1})),
make_tuple(StaticShape({3}), StaticShape({3})),
make_tuple(StaticShape({5, 2}), StaticShape({2, 6})),
make_tuple(StaticShape({2, 1, 2}), StaticShape({2, 6})),
make_tuple(StaticShape({10, 8, 9, 2}), StaticShape({10, 8, 2, 8})),
make_tuple(StaticShape({3, 1, 4, 3, 4}), StaticShape({3, 2, 1, 4, 1}))),
CPUMatMulTest::getTestCaseName);
} // namespace cpu_shape_infer
} // namespace unit_test
} // namespace intel_cpu
} // namespace ov

View File

@ -0,0 +1,28 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include "transformations/cpu_opset/common/op/ngram.hpp"
#include "custom_shape_infer.hpp"
namespace ov {
namespace intel_cpu {
namespace unit_test {
namespace cpu_shape_infer {
using namespace ov;
using namespace ov::intel_cpu;
TEST(CpuShapeInfer, Ngram) {
auto embeddings = std::make_shared<ov::op::v0::Parameter>(element::f32, PartialShape{-1, -1});
auto idces = std::make_shared<ov::op::v0::Parameter>(element::i32, PartialShape{-1, -1});
auto op = std::make_shared<ov::intel_cpu::NgramNode>(embeddings, idces, 3);
std::vector<StaticShape> static_input_shapes = {StaticShape{720, 640}, {5, 6}};
std::vector<StaticShape> static_output_shapes = {StaticShape{720, 640 * 3}};
unit_test::cpu_test_shape_infer(op.get(), static_input_shapes, static_output_shapes);
}
} // namespace cpu_shape_infer
} // namespace unit_test
} // namespace intel_cpu
} // namespace ov

View File

@ -0,0 +1,129 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include "common_test_utils/test_assertions.hpp"
#include "custom_shape_infer.hpp"
#include "openvino/op/ops.hpp"
namespace ov {
namespace intel_cpu {
namespace unit_test {
namespace cpu_shape_infer {
using namespace ov;
using namespace ov::intel_cpu;
using namespace testing;
using OneHotTestParams = std::tuple<unit_test::ShapeVector, // Input shapes
int64_t, // depth
int32_t, // on_value
int32_t, // off_value
StaticShape // Expected shape
>;
class OneHotCpuShapeInferenceTest : public unit_test::OpCpuShapeInferenceTest<op::v1::OneHot>,
public WithParamInterface<OneHotTestParams> {
public:
static std::string getTestCaseName(const testing::TestParamInfo<OneHotTestParams>& obj) {
unit_test::ShapeVector tmp_input_shapes;
int64_t tmp_depth;
int32_t tmp_on;
int32_t tmp_off;
StaticShape tmp_exp_shape;
std::tie(tmp_input_shapes, tmp_depth, tmp_on, tmp_off, tmp_exp_shape) = obj.param;
std::ostringstream result;
result << "IS" << CommonTestUtils::vec2str(tmp_input_shapes) << "_";
result << "depth" << tmp_depth << "_";
result << "on" << tmp_on << "_";
result << "off" << tmp_off << "_";
result << "exp_shape" << tmp_exp_shape;
return result.str();
}
protected:
void SetUp() override {
std::tie(input_shapes, m_depth, m_on, m_off, exp_shape) = GetParam();
output_shapes = unit_test::ShapeVector(0);
output_shapes.push_back(exp_shape);
arg = std::make_shared<op::v0::Parameter>(element::i64, input_shapes.front().get_shape());
}
int64_t m_depth;
int32_t m_on;
int32_t m_off;
std::shared_ptr<op::v0::Parameter> arg;
};
TEST_P(OneHotCpuShapeInferenceTest , shape_inference_empty_const_map) {
const auto depth = op::v0::Constant::create(element::i64, ov::Shape{}, {m_depth});
const auto on_value = op::v0::Constant::create(element::i32, ov::Shape{}, {m_on});
const auto off_value = op::v0::Constant::create(element::i32, ov::Shape{}, {m_off});
int64_t axis = -1;
const auto op = make_op(arg, depth, on_value, off_value, axis);
unit_test::cpu_test_shape_infer(op.get(), input_shapes, output_shapes);
}
TEST_P(OneHotCpuShapeInferenceTest , shape_inference_with_const_map) {
const auto depth = std::make_shared<op::v0::Parameter>(element::i64, ov::Shape{});
const auto on = std::make_shared<op::v0::Parameter>(element::i32, ov::Shape{});
const auto off = std::make_shared<op::v0::Parameter>(element::i32, ov::Shape{});
int64_t axis = -1;
const auto op = make_op(arg, depth, on, off, axis);
const auto depth_const = std::make_shared<op::v0::Constant>(element::i64, ov::Shape{}, std::vector<int64_t>{m_depth});
const auto on_const = std::make_shared<op::v0::Constant>(element::i32, ov::Shape{}, std::vector<int32_t>{m_on});
const auto off_const = std::make_shared<op::v0::Constant>(element::i32, ov::Shape{}, std::vector<int32_t>{m_off});
const auto depth_tensor = std::make_shared<ov::HostTensor>(depth_const);
const auto on_tensor = std::make_shared<ov::HostTensor>(on_const);
const auto off_tensor = std::make_shared<ov::HostTensor>(off_const);
const std::map<size_t, ov::HostTensorPtr>& constant_data = {{1, depth_tensor},
{2, on_tensor},
{3, off_tensor}};
unit_test::cpu_test_shape_infer(op.get(), input_shapes, output_shapes, constant_data);
}
INSTANTIATE_TEST_SUITE_P(
CpuShapeInfer,
OneHotCpuShapeInferenceTest ,
Values(make_tuple(unit_test::ShapeVector{{3}, {}, {}, {}}, 2, 5, 10, StaticShape({3, 2})),
make_tuple(unit_test::ShapeVector{{3}, {}, {}, {}}, 2, 1, 0, StaticShape({3, 2}))),
OneHotCpuShapeInferenceTest::getTestCaseName);
using OneHotCpuShapeInferenceThrowExceptionTest = OneHotCpuShapeInferenceTest;
TEST_P(OneHotCpuShapeInferenceThrowExceptionTest, wrong_pattern) {
GTEST_SKIP() << "Skipping test, please check CVS-108946";
const auto depth = std::make_shared<op::v0::Parameter>(element::i64, ov::Shape{});
const auto on = std::make_shared<op::v0::Parameter>(element::i32, ov::Shape{});
const auto off = std::make_shared<op::v0::Parameter>(element::i32, ov::Shape{});
int64_t axis = -1;
const auto op = make_op(arg, depth, on, off, axis);
const auto depth_const = std::make_shared<op::v0::Constant>(element::i64, ov::Shape{}, std::vector<int64_t>{m_depth});
const auto on_const = std::make_shared<op::v0::Constant>(element::i32, ov::Shape{}, std::vector<int32_t>{m_on});
const auto off_const = std::make_shared<op::v0::Constant>(element::i32, ov::Shape{}, std::vector<int32_t>{m_off});
const auto depth_tensor = std::make_shared<ov::HostTensor>(depth_const);
const auto on_tensor = std::make_shared<ov::HostTensor>(on_const);
const auto off_tensor = std::make_shared<ov::HostTensor>(off_const);
const std::map<size_t, ov::HostTensorPtr>& constant_data = {{1, depth_tensor},
{2, on_tensor},
{3, off_tensor}};
// TODO , implementation should throw exception
ASSERT_THROW(unit_test::cpu_test_shape_infer(op.get(), input_shapes, output_shapes, constant_data),
ov::Exception);
}
INSTANTIATE_TEST_SUITE_P(
CpuShapeInfer,
OneHotCpuShapeInferenceThrowExceptionTest,
Values(make_tuple(unit_test::ShapeVector{{3}, {}, {}, {}}, -2, 1, 0, StaticShape({}))),
OneHotCpuShapeInferenceThrowExceptionTest::getTestCaseName);
} // namespace cpu_shape_infer
} // namespace unit_test
} // namespace intel_cpu
} // namespace ov

View File

@ -0,0 +1,209 @@
// Copyright (C) 2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include "common_test_utils/test_assertions.hpp"
#include "custom_shape_infer.hpp"
#include <ngraph/opsets/opset8.hpp>
#include <vector>
namespace ov {
namespace intel_cpu {
namespace unit_test {
namespace cpu_shape_infer {
using namespace ov;
using namespace ov::intel_cpu;
using namespace testing;
// TODO should support v8::PriorBox
using PriorBoxV0TestParams = std::tuple<unit_test::ShapeVector, // Input shapes
op::v0::PriorBox::Attributes,
std::vector<std::vector<int32_t>>, // layer_data, image_data
StaticShape // Expected shape
>;
class PriorBoxV0CpuShapeInferenceTest : public unit_test::OpCpuShapeInferenceTest<ov::op::v0::PriorBox>,
public WithParamInterface<PriorBoxV0TestParams> {
public:
static std::string getTestCaseName(const testing::TestParamInfo<PriorBoxV0TestParams>& obj) {
unit_test::ShapeVector tmp_input_shapes;
op::v0::PriorBox::Attributes tmp_attrs;
std::vector<std::vector<int32_t>> tmp_data;
StaticShape tmp_exp_shape;
std::tie(tmp_input_shapes, tmp_attrs, tmp_data, tmp_exp_shape) = obj.param;
std::ostringstream result;
result << "IS" << CommonTestUtils::vec2str(tmp_input_shapes) << "_";
result << "min_size" << CommonTestUtils::vec2str(tmp_attrs.min_size) << "_";
result << "max_size" << CommonTestUtils::vec2str(tmp_attrs.max_size) << "_";
result << "density" << CommonTestUtils::vec2str(tmp_attrs.density) << "_";
result << "fixed_ratio" << CommonTestUtils::vec2str(tmp_attrs.fixed_ratio) << "_";
result << "fixed_size" << CommonTestUtils::vec2str(tmp_attrs.fixed_size) << "_";
result << "clip(" << unit_test::boolToString(tmp_attrs.clip) << ")_";
result << "flip(" << unit_test::boolToString(tmp_attrs.flip) << ")_";
result << "step(" << tmp_attrs.step << ")_";
result << "offset(" << tmp_attrs.offset << ")_";
result << "variance" << CommonTestUtils::vec2str(tmp_attrs.variance) << "_";
result << "scale_all_sizes(" << unit_test::boolToString(tmp_attrs.scale_all_sizes) << ")_";
result << "exp_shape(" << tmp_exp_shape << ")";
return result.str();
}
protected:
void SetUp() override {
std::tie(input_shapes, attrs, data, exp_shape) = GetParam();
output_shapes = unit_test::ShapeVector(0);
output_shapes.push_back(exp_shape);
ASSERT_EQ(input_shapes.size(), 2);
ASSERT_EQ(data.size(), 2);
}
op::v0::PriorBox::Attributes attrs;
std::vector<std::vector<int32_t>> data;
};
namespace prior_box {
const op::v0::PriorBox::Attributes createAttrs(
std::vector<float> min_size,
std::vector<float> max_size,
std::vector<float> aspect_ratio,
std::vector<float> density,
std::vector<float> fixed_ratio,
std::vector<float> fixed_size,
bool clip,
bool flip,
float step,
float offset,
std::vector<float> variance,
bool scale_all_sizes);
const op::v0::PriorBox::Attributes createAttrs(
std::vector<float> min_size,
std::vector<float> max_size,
std::vector<float> aspect_ratio,
std::vector<float> density,
std::vector<float> fixed_ratio,
std::vector<float> fixed_size,
bool clip,
bool flip,
float step,
float offset,
std::vector<float> variance,
bool scale_all_sizes) {
op::v0::PriorBox::Attributes attrs;
attrs.min_size = min_size;
attrs.max_size = max_size;
attrs.aspect_ratio = aspect_ratio;
attrs.density = density;
attrs.fixed_ratio = fixed_ratio;
attrs.fixed_size = fixed_size;
attrs.clip = clip;
attrs.flip = flip;
attrs.step = step;
attrs.offset = offset;
attrs.variance = variance;
attrs.scale_all_sizes = scale_all_sizes;
return attrs;
}
const op::v0::PriorBox::Attributes attrs1 = createAttrs(
{16.0f}, // min_size Desired min_size of prior boxes
{38.46f}, // max_size Desired max_size of prior boxes
{2.0f}, // aspect_ratio Aspect ratios of prior boxes
{}, // density
{}, // fixed_ratio
{}, // fixed_size
false, // clip Clip output to [0, 1]
true, // flip Flip aspect ratios
16.0f, // step Distance between prior box centers
0.5f, // offset Box offset relative to top center of image
{0.1f, 0.1f, 0.2f, 0.2f}, // variance Values to adjust prior boxes with
true // scale_all_sizes Scale all sizes
);
const op::v0::PriorBox::Attributes attrs2 = createAttrs(
{2.0f, 3.0f}, // min_size Desired min_size of prior boxes
{}, // max_size Desired max_size of prior boxes
{1.5f, 2.0f, 2.5f}, // aspect_ratio Aspect ratios of prior boxes
{}, // density
{}, // fixed_ratio
{}, // fixed_size
false, // clip Clip output to [0, 1]
false, // flip Flip aspect ratios
0.0f, // step Distance between prior box centers
0.0f, // offset Box offset relative to top center of image
{}, // variance Values to adjust prior boxes with
false // scale_all_sizes Scale all sizes
);
const op::v0::PriorBox::Attributes attrs3 = createAttrs(
{2.0f, 3.0f}, // min_size Desired min_size of prior boxes
{}, // max_size Desired max_size of prior boxes
{1.5f, 2.0f, 2.5f}, // aspect_ratio Aspect ratios of prior boxes
{}, // density
{}, // fixed_ratio
{}, // fixed_size
false, // clip Clip output to [0, 1]
true, // flip Flip aspect ratios
0.0f, // step Distance between prior box centers
0.0f, // offset Box offset relative to top center of image
{}, // variance Values to adjust prior boxes with
false // scale_all_sizes Scale all sizes
);
const op::v0::PriorBox::Attributes attrs4 = createAttrs(
{256.0f}, // min_size Desired min_size of prior boxes
{315.0f}, // max_size Desired max_size of prior boxes
{2.0f}, // aspect_ratio Aspect ratios of prior boxes
{}, // density
{}, // fixed_ratio
{}, // fixed_size
false, // clip Clip output to [0, 1]
true, // flip Flip aspect ratios
0.0f, // step Distance between prior box centers
0.0f, // offset Box offset relative to top center of image
{}, // variance Values to adjust prior boxes with
true // scale_all_sizes Scale all sizes
);
} // namespace prior_box
TEST_P(PriorBoxV0CpuShapeInferenceTest , shape_inference_empty_const_map) {
const auto layer_const = std::make_shared<op::v0::Constant>(element::i32, ov::Shape{2}, data[0]);
const auto image_const = std::make_shared<op::v0::Constant>(element::i32, ov::Shape{2}, data[1]);
auto op = make_op(layer_const, image_const, attrs);
unit_test::cpu_test_shape_infer(op.get(), input_shapes, output_shapes);
}
TEST_P(PriorBoxV0CpuShapeInferenceTest , shape_inference_with_const_map) {
const auto layer_shape = std::make_shared<ov::op::v0::Parameter>(element::i32, PartialShape::dynamic());
const auto image_shape = std::make_shared<ov::op::v0::Parameter>(element::i32, PartialShape::dynamic());
auto op = make_op(layer_shape, image_shape, attrs);
const auto layer_const = std::make_shared<op::v0::Constant>(element::i32, ov::Shape{2}, data[0]);
const auto image_const = std::make_shared<op::v0::Constant>(element::i32, ov::Shape{2}, data[1]);
const std::map<size_t, HostTensorPtr> const_data {
{0, std::make_shared<HostTensor>(layer_const)},
{1, std::make_shared<HostTensor>(image_const)},
};
unit_test::cpu_test_shape_infer(op.get(), input_shapes, output_shapes, const_data);
}
INSTANTIATE_TEST_SUITE_P(
CpuShapeInfer,
PriorBoxV0CpuShapeInferenceTest ,
Values(make_tuple(unit_test::ShapeVector{{2}, {2}}, prior_box::attrs1,
std::vector<std::vector<int32_t>>{{24, 42}, {384, 672}}, StaticShape({2, 16128})),
make_tuple(unit_test::ShapeVector{{2}, {2}}, prior_box::attrs2,
std::vector<std::vector<int32_t>>{{32, 32}, {384, 672}}, StaticShape({2, 20480})),
make_tuple(unit_test::ShapeVector{{2}, {2}}, prior_box::attrs3,
std::vector<std::vector<int32_t>>{{32, 32}, {300, 300}}, StaticShape({2, 32768})),
make_tuple(unit_test::ShapeVector{{2}, {2}}, prior_box::attrs4,
std::vector<std::vector<int32_t>>{{1, 1}, {300, 300}}, StaticShape({2, 16}))),
PriorBoxV0CpuShapeInferenceTest::getTestCaseName);
} // namespace cpu_shape_infer
} // namespace unit_test
} // namespace intel_cpu
} // namespace ov

View File

@ -0,0 +1,174 @@
// Copyright (C) 2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include "common_test_utils/test_assertions.hpp"
#include "custom_shape_infer.hpp"
#include "openvino/op/parameter.hpp"
#include "openvino/op/prior_box_clustered.hpp"
#include "openvino/op/ops.hpp"
namespace ov {
namespace intel_cpu {
namespace unit_test {
namespace cpu_shape_infer {
using namespace ov;
using namespace ov::intel_cpu;
using namespace testing;
using PriorBoxClusteredV0TestParams = std::tuple<unit_test::ShapeVector, // Input shapes
op::v0::PriorBoxClustered::Attributes,
std::vector<std::vector<int32_t>>, // layer_data, image_data
StaticShape // Expected shape
>;
class PriorBoxClusteredV0CpuShapeInferenceTest : public unit_test::OpCpuShapeInferenceTest<op::v0::PriorBoxClustered>,
public WithParamInterface<PriorBoxClusteredV0TestParams> {
public:
static std::string getTestCaseName(const testing::TestParamInfo<PriorBoxClusteredV0TestParams>& obj) {
unit_test::ShapeVector tmp_input_shapes;
op::v0::PriorBoxClustered::Attributes tmp_attrs;
std::vector<std::vector<int32_t>> tmp_data;
StaticShape tmp_exp_shape;
std::tie(tmp_input_shapes, tmp_attrs, tmp_data, tmp_exp_shape) = obj.param;
std::ostringstream result;
result << "IS" << CommonTestUtils::vec2str(tmp_input_shapes) << "_";
result << "widths" << CommonTestUtils::vec2str(tmp_attrs.widths) << "_";
result << "heights" << CommonTestUtils::vec2str(tmp_attrs.heights) << "_";
result << "clip(" << unit_test::boolToString(tmp_attrs.clip) << ")_";
result << "step_widths(" << tmp_attrs.step_widths<< ")_";
result << "step_heights(" << tmp_attrs.step_heights << ")_";
result << "offset(" << tmp_attrs.offset << ")_";
result << "variances" << CommonTestUtils::vec2str(tmp_attrs.variances) << "_";
result << "exp_shape(" << tmp_exp_shape << ")";
return result.str();
}
protected:
void SetUp() override {
std::tie(input_shapes, attrs, data, exp_shape) = GetParam();
output_shapes = unit_test::ShapeVector(0);
output_shapes.push_back(exp_shape);
ASSERT_LE(input_shapes.size(), 2);
ASSERT_LE(data.size(), 2);
}
op::v0::PriorBoxClustered::Attributes attrs;
std::vector<std::vector<int32_t>> data;
unit_test::ShapeVector input_shapes;
};
namespace prior_box_cluster {
const op::v0::PriorBoxClustered::Attributes createAttrs(
std::vector<float> widths,
std::vector<float> heights,
bool clip,
float step_widths,
float step_heights,
float step,
float offset,
std::vector<float> variances);
const op::v0::PriorBoxClustered::Attributes createAttrs(
std::vector<float> widths,
std::vector<float> heights,
bool clip,
float step_widths,
float step_heights,
float step,
float offset,
std::vector<float> variances) {
op::v0::PriorBoxClustered::Attributes attrs;
attrs.widths = widths;
attrs.heights = heights;
attrs.clip = clip;
attrs.step_widths = step_widths;
attrs.step_heights = step_heights;
attrs.offset = offset;
attrs.variances = variances;
return attrs;
}
const op::v0::PriorBoxClustered::Attributes attrs1 = createAttrs(
{2.0f, 3.0f} , // widths Desired widths of prior boxes
{1.5f, 2.0f}, // heights Desired heights of prior boxes
true, // clip Clip output to [0, 1]
0.0f, // step_widths Distance between prior box centers
0.0f, // step_heights Distance between prior box centers
0.0f, // step Distance between prior box centers (when step_w = step_h)
0.0f, // offset Box offset relative to top center of image
{} // variances Values to adjust prior boxes with
);
const op::v0::PriorBoxClustered::Attributes attrs2 = createAttrs(
{86.0f, 13.0f, 57.0f, 39.0f, 68.0f, 34.0f, 142.0f, 50.0f, 23.0f}, // widths Desired widths of prior boxes
{44.0f, 10.0f, 30.0f, 19.0f, 94.0f, 32.0f, 61.0f, 53.0f, 17.0f}, // heights Desired heights of prior boxes
false, // clip Clip output to [0, 1]
0.0f, // step_widths Distance between prior box centers
0.0f, // step_heights Distance between prior box centers
16.0f, // step Distance between prior box centers (when step_w = step_h)
0.5f, // offset Box offset relative to top center of image
{0.1f, 0.1f, 0.2f, 0.2f} // variances Values to adjust prior boxes with
);
const op::v0::PriorBoxClustered::Attributes attrs3 = createAttrs(
{4.0f, 2.0f, 3.2f} , // widths Desired widths of prior boxes
{1.0f, 2.0f, 1.1f}, // heights Desired heights of prior boxes
true, // clip Clip output to [0, 1]
0.0f, // step_widths Distance between prior box centers
0.0f, // step_heights Distance between prior box centers
0.0f, // step Distance between prior box centers (when step_w = step_h)
0.0f, // offset Box offset relative to top center of image
{} // variances Values to adjust prior boxes with
);
} // namespace prior_box_cluster
TEST_P(PriorBoxClusteredV0CpuShapeInferenceTest , shape_inference_empty_const_map) {
const auto layer_const = std::make_shared<op::v0::Constant>(element::i32, ov::Shape{2}, data[0]);
std::shared_ptr<op::v0::PriorBoxClustered> op;
if (input_shapes.size() == 2) {
const auto image_const = std::make_shared<op::v0::Constant>(element::i32, ov::Shape{2}, data[1]);
op = make_op(layer_const, image_const, attrs);
} else {
const auto image_param = std::make_shared<op::v0::Parameter>(element::i32, PartialShape::dynamic());
op = make_op(layer_const, image_param, attrs);
}
unit_test::cpu_test_shape_infer(op.get(), input_shapes, output_shapes);
}
TEST_P(PriorBoxClusteredV0CpuShapeInferenceTest , shape_inference_with_const_map) {
const auto layer_shape = std::make_shared<ov::op::v0::Parameter>(element::i32, PartialShape::dynamic());
const auto image_shape = std::make_shared<ov::op::v0::Parameter>(element::i32, PartialShape::dynamic());
auto op = make_op(layer_shape, image_shape, attrs);
const auto layer_const = std::make_shared<op::v0::Constant>(element::i32, ov::Shape{2}, data[0]);
std::map<size_t, HostTensorPtr> const_data{{0, std::make_shared<HostTensor>(layer_const)}};
if (input_shapes.size() == 2) {
const auto image_const = std::make_shared<op::v0::Constant>(element::i32, ov::Shape{2}, data[1]);
const_data.insert({1, std::make_shared<HostTensor>(image_const)});
}
unit_test::cpu_test_shape_infer(op.get(), input_shapes, output_shapes, const_data);
}
INSTANTIATE_TEST_SUITE_P(
CpuShapeInfer,
PriorBoxClusteredV0CpuShapeInferenceTest ,
Values(make_tuple(unit_test::ShapeVector{{2}}, prior_box_cluster::attrs1,
std::vector<std::vector<int32_t>>{{2, 5}}, StaticShape({2, 80})),
make_tuple(unit_test::ShapeVector{{2}, {2}}, prior_box_cluster::attrs1,
std::vector<std::vector<int32_t>>{{12, 16}, {50, 50}}, StaticShape({2, 1536})),
make_tuple(unit_test::ShapeVector{{2}, {2}}, prior_box_cluster::attrs2,
std::vector<std::vector<int32_t>>{{10, 19}, {180, 300}}, StaticShape({2, 6840})),
make_tuple(unit_test::ShapeVector{{2}, {2}}, prior_box_cluster::attrs3,
std::vector<std::vector<int32_t>>{{19, 19}, {300, 300}}, StaticShape({2, 4332}))),
PriorBoxClusteredV0CpuShapeInferenceTest::getTestCaseName);
} // namespace cpu_shape_infer
} // namespace unit_test
} // namespace intel_cpu
} // namespace ov

View File

@ -0,0 +1,137 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include <ostream>
#include "common_test_utils/test_assertions.hpp"
#include "custom_shape_infer.hpp"
#include "openvino/op/ops.hpp"
namespace ov {
namespace intel_cpu {
namespace unit_test {
namespace cpu_shape_infer {
using namespace ov;
using namespace ov::intel_cpu;
using namespace testing;
using ReshapeTestParams = std::tuple<unit_test::ShapeVector, // Input shapes
std::vector<int64_t>, // reshape axes
StaticShape, // Expected shape
bool // specal zero
>;
class ReshapeCpuShapeInferenceTest : public unit_test::OpCpuShapeInferenceTest<op::v1::Reshape>,
public WithParamInterface<ReshapeTestParams> {
public:
static std::string getTestCaseName(const testing::TestParamInfo<ReshapeTestParams>& obj) {
unit_test::ShapeVector tmp_input_shapes;
std::vector<int64_t> tmp_axes;
StaticShape tmp_exp_shape;
bool tmp_specialZero;
std::tie(tmp_input_shapes, tmp_axes, tmp_exp_shape, tmp_specialZero) = obj.param;
std::ostringstream result;
result << "IS" << CommonTestUtils::vec2str(tmp_input_shapes) << "_";
result << "axes" << CommonTestUtils::vec2str(tmp_axes) << "_";
result << "exp_shape(" << tmp_exp_shape << ")_";
result << "specalZero(" << unit_test::boolToString(tmp_specialZero) << ")";
return result.str();
}
protected:
void SetUp() override {
std::tie(input_shapes, axes, exp_shape, specalZero) = GetParam();
output_shapes = unit_test::ShapeVector(0);
arg = std::make_shared<op::v0::Parameter>(element::f32, input_shapes.front().get_shape());
}
std::vector<int64_t> axes;
std::shared_ptr<op::v0::Parameter> arg;
bool specalZero;
};
TEST_P(ReshapeCpuShapeInferenceTest , shape_inference_empty_const_map) {
const auto axes_node = std::make_shared<op::v0::Constant>(element::i64, ov::Shape{axes.size()}, axes);
const auto op = make_op(arg, axes_node, specalZero);
output_shapes.push_back(exp_shape);
unit_test::cpu_test_shape_infer(op.get(), input_shapes, output_shapes);
}
TEST_P(ReshapeCpuShapeInferenceTest , shape_inference_with_const_map) {
const auto axes_node = std::make_shared<op::v0::Parameter>(element::i64, PartialShape::dynamic());
const auto op = make_op(arg, axes_node, specalZero);
const auto axes_const = std::make_shared<op::v0::Constant>(element::i64, ov::Shape{axes.size()}, axes);
const auto axes_tensor = std::make_shared<ov::HostTensor>(axes_const);
const std::map<size_t, ov::HostTensorPtr>& constant_data = {{1, axes_tensor}};
output_shapes.push_back(exp_shape);
unit_test::cpu_test_shape_infer(op.get(), input_shapes, output_shapes, constant_data);
}
INSTANTIATE_TEST_SUITE_P(
CpuShapeInfer,
ReshapeCpuShapeInferenceTest ,
Values(make_tuple(unit_test::ShapeVector{{1, 2, 3, 1}, {2}}, std::vector<int64_t>{0, -1}, StaticShape({1, 6}), true),
make_tuple(unit_test::ShapeVector{{1, 2, 3, 8}, {4}}, std::vector<int64_t>{1, 2, 3, 8}, StaticShape({1, 2, 3, 8}), true),
make_tuple(unit_test::ShapeVector{{1, 2, 3, 8}, {4}}, std::vector<int64_t>{0, 2, 0, 8}, StaticShape({1, 2, 3, 8}), true),
make_tuple(unit_test::ShapeVector{{0, 2, 2}, {2}}, std::vector<int64_t>{0, -1}, StaticShape({0, 4}), true),
make_tuple(unit_test::ShapeVector{{0, 2, 2}, {2}}, std::vector<int64_t>{0, 4}, StaticShape({0, 4}), true),
make_tuple(unit_test::ShapeVector{{4, 0, 2}, {2}}, std::vector<int64_t>{-1, 0}, StaticShape({8, 0}), true),
make_tuple(unit_test::ShapeVector{{1, 2, 3, 8}, {4}}, std::vector<int64_t>{1, 2, 3, 8}, StaticShape({1, 2, 3, 8}), false),
make_tuple(unit_test::ShapeVector{{0, 2, 2}, {2}}, std::vector<int64_t>{3, 0}, StaticShape({3, 0}), false),
make_tuple(unit_test::ShapeVector{{0, 2, 2}, {2}}, std::vector<int64_t>{4, 0}, StaticShape({4, 0}), false),
make_tuple(unit_test::ShapeVector{{3, 6, 5, 5}, {2}}, std::vector<int64_t>{0, -1}, StaticShape({3, 150}), true)),
ReshapeCpuShapeInferenceTest::getTestCaseName);
using ReshapeCpuShapeInferenceThrowExceptionTest = ReshapeCpuShapeInferenceTest;
TEST_P(ReshapeCpuShapeInferenceThrowExceptionTest, wrong_pattern) {
const auto axes_node = std::make_shared<op::v0::Parameter>(element::i64, PartialShape::dynamic());
const auto op = make_op(arg, axes_node, specalZero);
const auto axes_const = std::make_shared<op::v0::Constant>(element::i64, ov::Shape{axes.size()}, axes);
const auto axes_tensor = std::make_shared<ov::HostTensor>(axes_const);
const std::map<size_t, ov::HostTensorPtr>& constant_data = {{1, axes_tensor}};
std::ostringstream os;
os << "[cpu]reshape: the shape of input data ";
os << "(";
for (size_t i = 0; i < input_shapes[0].size(); i++) {
os << input_shapes[0][i];
if (i < input_shapes[0].size() - 1) {
os << ".";
}
}
os << ")";
os << " conflicts with the reshape pattern ";
os << "(";
for (size_t i = 0; i < axes.size(); i++) {
os << axes[i];
if (i < axes.size() - 1) {
os << ".";
}
}
os << ")";
OV_EXPECT_THROW(unit_test::cpu_test_shape_infer(op.get(), input_shapes, output_shapes, constant_data),
ov::Exception,
HasSubstr(os.str()));
}
INSTANTIATE_TEST_SUITE_P(
CpuShapeInfer,
ReshapeCpuShapeInferenceThrowExceptionTest,
Values(make_tuple(unit_test::ShapeVector{{1, 2, 3, 1}, {3}}, std::vector<int64_t>{0, -1, -1}, StaticShape({}), true),
make_tuple(unit_test::ShapeVector{{1, 2, 3, 8}, {4}}, std::vector<int64_t>{1, 2, 3, 6}, StaticShape({}), true),
make_tuple(unit_test::ShapeVector{{1, 2, 3, 8}, {4}}, std::vector<int64_t>{0, 3, 0, 8}, StaticShape({}), true),
make_tuple(unit_test::ShapeVector{{1, 2, 3, 8}, {4}}, std::vector<int64_t>{1, 2, 3, 6}, StaticShape({}), false),
make_tuple(unit_test::ShapeVector{{0, 2, 2}, {2}}, std::vector<int64_t>{3, 3}, StaticShape({}), false)),
ReshapeCpuShapeInferenceThrowExceptionTest::getTestCaseName);
} // namespace cpu_shape_infer
} // namespace unit_test
} // namespace intel_cpu
} // namespace ov

View File

@ -0,0 +1,59 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include <ostream>
#include "common_test_utils/test_assertions.hpp"
#include "custom_shape_infer.hpp"
#include "openvino/op/ops.hpp"
namespace ov {
namespace intel_cpu {
namespace unit_test {
namespace cpu_shape_infer {
using namespace ov;
using namespace ov::intel_cpu;
using namespace testing;
TEST(CpuShapeInfer, ShapeOf5DTest) {
auto data = std::make_shared<ov::op::v0::Parameter>(element::f32, PartialShape{-1, -1, -1, -1});
auto shapeof =
std::make_shared<op::v0::ShapeOf>(data);
std::vector<StaticShape> static_input_shapes = {StaticShape{2, 3, 4, 5, 6}},
static_output_shapes = {StaticShape{5}};
unit_test::cpu_test_shape_infer(shapeof.get(), static_input_shapes, static_output_shapes);
}
TEST(CpuShapeInfer, v3ShapeOf5DTest) {
auto data = std::make_shared<ov::op::v0::Parameter>(element::f32, PartialShape{-1, -1, -1, -1});
auto shapeof =
std::make_shared<op::v3::ShapeOf>(data);
std::vector<StaticShape> static_input_shapes = {StaticShape{2, 3, 4, 5, 6}},
static_output_shapes = {StaticShape{5}};
unit_test::cpu_test_shape_infer(shapeof.get(), static_input_shapes, static_output_shapes);
}
TEST(CpuShapeInfer, ShapeOf0DTest) {
GTEST_SKIP() << "Skipping test, please check CVS-108946";
auto data = std::make_shared<ov::op::v0::Parameter>(element::f32, PartialShape{});
auto shapeof =
std::make_shared<op::v3::ShapeOf>(data);
std::vector<StaticShape> static_input_shapes = {StaticShape{}},
static_output_shapes = {StaticShape{}};
// TODO , can't pass implementation don't support 0D shape input
unit_test::cpu_test_shape_infer(shapeof.get(), static_input_shapes, static_output_shapes);
}
} // namespace cpu_shape_infer
} // namespace unit_test
} // namespace intel_cpu
} // namespace ov

Some files were not shown because too many files have changed in this diff Show More