diff --git a/src/plugins/intel_cpu/src/cpu_shape.h b/src/plugins/intel_cpu/src/cpu_shape.h index 62d624bce8d..9f4f4ee929a 100644 --- a/src/plugins/intel_cpu/src/cpu_shape.h +++ b/src/plugins/intel_cpu/src/cpu_shape.h @@ -44,7 +44,7 @@ public: Shape(const VectorDims& minDims, const VectorDims& maxDims) { if (minDims.size() != maxDims.size()) { - IE_THROW() << "Can't create shape due to min/max vectors dims size mismatch"; + OPENVINO_THROW("Can't create shape due to min/max vectors dims size mismatch"); } this->minDims = minDims; this->maxDims = maxDims; @@ -114,7 +114,7 @@ public: */ const VectorDims& getStaticDims() const { if (type != ShapeType::Static) { - IE_THROW() << "Cannot get dims for non static shape"; + OPENVINO_THROW("Cannot get dims for non static shape"); } return minDims; @@ -155,7 +155,7 @@ public: size_t getElementsCount() const { if (type != ShapeType::Static) { - IE_THROW() << "Cannot get elements count for non static shape"; + OPENVINO_THROW("Cannot get elements count for non static shape"); } size_t size = 1; diff --git a/src/plugins/intel_cpu/src/node.h b/src/plugins/intel_cpu/src/node.h index 4cfe9c7d708..cffa7405077 100644 --- a/src/plugins/intel_cpu/src/node.h +++ b/src/plugins/intel_cpu/src/node.h @@ -32,7 +32,7 @@ #include "nodes/node_config.h" #include "cache/multi_cache.h" -#include +#include #include "utils/debug_capabilities.h" #include "utils/bit_util.hpp" diff --git a/src/plugins/intel_cpu/src/nodes/adaptive_pooling.cpp b/src/plugins/intel_cpu/src/nodes/adaptive_pooling.cpp index 30bbc45a009..0feec97c849 100644 --- a/src/plugins/intel_cpu/src/nodes/adaptive_pooling.cpp +++ b/src/plugins/intel_cpu/src/nodes/adaptive_pooling.cpp @@ -14,6 +14,7 @@ #include #include #include +#include "shape_inference/custom/adaptive_pooling.hpp" using namespace InferenceEngine; using namespace dnnl; @@ -23,57 +24,6 @@ namespace ov { namespace intel_cpu { namespace node { -namespace { -/** - * Implements Adaptive Pooling shape inference algorithm. The output tensor shape consists of the input [N, C] dimensions and - * the [D_out, H_out, W_out] dimensions, which are placed in the second input parameter. - * - */ -class AdaptivePoolingShapeInfer : public ShapeInferEmptyPads { -public: - explicit AdaptivePoolingShapeInfer(size_t outputs_count) : m_outputs_count(outputs_count) {} - Result infer( - const std::vector>& input_shapes, - const std::unordered_map& data_dependency) override { - const auto& inputDims = input_shapes[0].get(); - const auto& spatialDims = input_shapes[1].get(); - const auto inputRank = inputDims.size(); - const auto spatialDimsSize = spatialDims[0]; - - VectorDims outputDims(inputRank); - outputDims[0] = inputDims[0]; - outputDims[1] = inputDims[1]; - auto newSpatialDimsPtr = reinterpret_cast(data_dependency.at(1)->getData()); - for (size_t i = 0; i < spatialDimsSize; i++) { - outputDims[i + 2] = newSpatialDimsPtr[i]; - } - - std::vector result(m_outputs_count, outputDims); - return {std::move(result), ShapeInferStatus::success}; - } - - port_mask_t get_port_mask() const override { - return PortMask(1); - } - -private: - size_t m_outputs_count; -}; - -class AdaptivePoolingShapeInferFactory : public ShapeInferFactory { -public: - AdaptivePoolingShapeInferFactory(std::shared_ptr op) : m_op(op) {} - ShapeInferPtr makeShapeInfer() const override { - size_t outputs_count = m_op->get_output_size(); - return std::make_shared(outputs_count); - } - -private: - std::shared_ptr m_op; -}; - -} // namespace - bool AdaptivePooling::isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept { try { if (one_of(op->get_type_info(), ngraph::op::v8::AdaptiveAvgPool::get_type_info_static())) { diff --git a/src/plugins/intel_cpu/src/nodes/bucketize.cpp b/src/plugins/intel_cpu/src/nodes/bucketize.cpp index 2073b82e0ff..ec7b94c32d3 100644 --- a/src/plugins/intel_cpu/src/nodes/bucketize.cpp +++ b/src/plugins/intel_cpu/src/nodes/bucketize.cpp @@ -7,7 +7,7 @@ #include #include -#include +#include #include "ie_parallel.hpp" #include "bucketize.h" diff --git a/src/plugins/intel_cpu/src/nodes/color_convert.cpp b/src/plugins/intel_cpu/src/nodes/color_convert.cpp index 951db703a96..9d302b0e5c2 100644 --- a/src/plugins/intel_cpu/src/nodes/color_convert.cpp +++ b/src/plugins/intel_cpu/src/nodes/color_convert.cpp @@ -11,6 +11,7 @@ #include #include #include "kernels/x64/jit_kernel.hpp" +#include "shape_inference/custom/color_convert.hpp" using namespace InferenceEngine; using namespace dnnl::impl; @@ -971,45 +972,6 @@ public: #endif } // namespace i420 -/** - * Implements Color Convert shape inference algorithm. Depending on wether it has only single plain H dimension is - * passed through or recalculated as 2/3 of the initial size. - * - */ -class ColorConvertShapeInfer : public ShapeInferEmptyPads { -public: - ColorConvertShapeInfer(bool singlePlain) : m_singlePlain(singlePlain) {} - Result infer(const std::vector>& input_shapes, - const std::unordered_map& data_dependency) override { - const auto& dims = input_shapes.front().get(); - if (dims.size() != 4) - IE_THROW() <<"NV12Converter node has incorrect input dimensions"; - return { m_singlePlain - ? std::vector{ { dims[Converter::N_DIM], dims[Converter::H_DIM] * 2 / 3, dims[Converter::W_DIM], 3 } } - : std::vector{ { dims[Converter::N_DIM], dims[Converter::H_DIM], dims[Converter::W_DIM], 3 } }, - ShapeInferStatus::success }; - } - - port_mask_t get_port_mask() const override { - return EMPTY_PORT_MASK; - } - -private: - bool m_singlePlain = false; -}; - -class ColorConvertShapeInferFactory : public ShapeInferFactory { -public: - ColorConvertShapeInferFactory(std::shared_ptr op) : m_op(op) {} - ShapeInferPtr makeShapeInfer() const override { - bool isSinglePlain = m_op->get_input_size() == 1; - return std::make_shared(isSinglePlain); - } - -private: - std::shared_ptr m_op; -}; - } // namespace ColorConvert::Converter::Converter(Node *node, const ColorFormat & colorFormat) diff --git a/src/plugins/intel_cpu/src/nodes/convert.cpp b/src/plugins/intel_cpu/src/nodes/convert.cpp index 2f3fa0d1b67..6e6e2c9f440 100644 --- a/src/plugins/intel_cpu/src/nodes/convert.cpp +++ b/src/plugins/intel_cpu/src/nodes/convert.cpp @@ -8,7 +8,7 @@ #include #include #include -#include +#include using namespace dnnl; using namespace InferenceEngine; diff --git a/src/plugins/intel_cpu/src/nodes/deconv.cpp b/src/plugins/intel_cpu/src/nodes/deconv.cpp index 013bf993280..a7664b809a3 100644 --- a/src/plugins/intel_cpu/src/nodes/deconv.cpp +++ b/src/plugins/intel_cpu/src/nodes/deconv.cpp @@ -21,7 +21,7 @@ #include #include #include -#include +#include #if defined(OV_CPU_WITH_ACL) #include "executors/acl/acl_utils.hpp" @@ -121,7 +121,7 @@ bool DeconvKey::operator==(const DeconvKey &rhs) const { /** * Deconvolution shape inference factory. It defines the input mask depending on the existence of the `output_shape` input. * Since in case it exists, plugin should pass the input data to the shape inference function. - * + * */ class DeconfolutionShapeInferFactory : public ShapeInferFactory { public: diff --git a/src/plugins/intel_cpu/src/nodes/eltwise.cpp b/src/plugins/intel_cpu/src/nodes/eltwise.cpp index 5babd5287d6..d163443057d 100644 --- a/src/plugins/intel_cpu/src/nodes/eltwise.cpp +++ b/src/plugins/intel_cpu/src/nodes/eltwise.cpp @@ -46,6 +46,7 @@ #include #include #include "memory_desc/dnnl_blocked_memory_desc.h" +#include "shape_inference/custom/eltwise.hpp" using namespace InferenceEngine; using namespace dnnl::impl::utils; @@ -927,64 +928,6 @@ private: #endif // OPENVINO_ARCH_X86_64 -namespace { - -/** - * Implements Eltwise shape inference algorithm. The algorithm is based on broadcasting all the input shapes - * according to the NUMPY broadcast rule. This implementation is more lightweight than the ngraph one. - * - */ -class EltwiseShapeInfer : public ShapeInferEmptyPads { -public: - Result infer( - const std::vector>& input_shapes, - const std::unordered_map& data_dependency) override { - size_t max_rank = 0; - size_t max_rank_idx = 0; - for (size_t i = 0; i < input_shapes.size(); ++i) { - auto item_rank = input_shapes[i].get().size(); - if (item_rank > max_rank) { - max_rank = item_rank; - max_rank_idx = i; - } - } - auto output_shape = input_shapes[max_rank_idx].get(); - // use NUMPY broadcast rule - for (size_t i = 0; i < input_shapes.size(); i++) { - if (i == max_rank_idx) - continue; - - auto& input_shape = input_shapes[i].get(); - if (input_shape.size() > output_shape.size()) { - IE_THROW() << "Eltwise shape infer input and output shapes rank mismatch"; - } - size_t offset = output_shape.size() - input_shape.size(); - for (size_t j = 0; j < input_shape.size(); ++j) { - if (input_shape[j] != output_shape[offset + j]) { - if (output_shape[offset + j] == 1) { - output_shape[offset + j] = input_shape[j]; - } else { - if (input_shape[j] != 1) IE_THROW() << "Eltwise shape infer input shapes dim index: " << j << " mismatch"; - } - } - } - } - return { { std::move(output_shape) }, ShapeInferStatus::success }; - } - port_mask_t get_port_mask() const override { - return EMPTY_PORT_MASK; - } -}; - -class EltwiseShapeInferFactory : public ShapeInferFactory { -public: - ShapeInferPtr makeShapeInfer() const override { - return std::make_shared(); - } -}; - -} // namespace - Eltwise::BroadcastingPolicy Eltwise::determineBroadcastingPolicy(const std::shared_ptr& op) { const auto const1 = ov::as_type_ptr(op->get_input_node_shared_ptr(0)); const auto const2 = ov::as_type_ptr(op->get_input_node_shared_ptr(1)); diff --git a/src/plugins/intel_cpu/src/nodes/eye.cpp b/src/plugins/intel_cpu/src/nodes/eye.cpp index 747e89bdc1e..a17486e7de9 100644 --- a/src/plugins/intel_cpu/src/nodes/eye.cpp +++ b/src/plugins/intel_cpu/src/nodes/eye.cpp @@ -6,7 +6,7 @@ #include #include #include -#include +#include #define THROW_ERROR IE_THROW() << NameFromType(getType()) << " node with name '" << getName() << "' " diff --git a/src/plugins/intel_cpu/src/nodes/fake_quantize.cpp b/src/plugins/intel_cpu/src/nodes/fake_quantize.cpp index 283a6555be3..c03f6b16a27 100644 --- a/src/plugins/intel_cpu/src/nodes/fake_quantize.cpp +++ b/src/plugins/intel_cpu/src/nodes/fake_quantize.cpp @@ -23,7 +23,7 @@ #include "memory_desc/dnnl_blocked_memory_desc.h" #include "common/cpu_memcpy.h" #include -#include +#include #include #include "utils/ngraph_utils.hpp" diff --git a/src/plugins/intel_cpu/src/nodes/fullyconnected.cpp b/src/plugins/intel_cpu/src/nodes/fullyconnected.cpp index 9ba336b66d4..f85f2c15a93 100644 --- a/src/plugins/intel_cpu/src/nodes/fullyconnected.cpp +++ b/src/plugins/intel_cpu/src/nodes/fullyconnected.cpp @@ -26,6 +26,7 @@ #include "common/primitive_desc.hpp" #include "common/primitive_desc_iface.hpp" #include "common/cpu_convert.h" +#include "shape_inference/custom/fullyconnected.hpp" #include #include @@ -92,53 +93,6 @@ bool FCKey::operator==(const FCKey &rhs) const { return retVal; } -class FCShapeInfer : public ShapeInferEmptyPads { -public: - FCShapeInfer(size_t outPut_rank) : out_rank(outPut_rank) {} - Result infer( - const std::vector>& input_shapes, - const std::unordered_map& data_dependency) override { - const VectorDims& activationShape = input_shapes[0].get(); - const VectorDims& weightShape = input_shapes[1].get(); - size_t activationRank = activationShape.size(); - size_t channelRank = weightShape.size() - 1; - - // activation weight output_shape - // NCHW CoCHW NCo - // TNC CoC TNCo - // NC CoC NCo - VectorDims outputShape(out_rank, 1); - // set Co - outputShape.back() = weightShape[0]; - // set batch dims - size_t batchRank = activationRank - channelRank; - size_t startIdx = out_rank - batchRank - 1; - for (size_t i = 0; i < batchRank; i++) { - outputShape[i + startIdx] = activationShape[i]; - } - - return {{std::move(outputShape)}, ShapeInferStatus::success}; - } - - port_mask_t get_port_mask() const override { - return EMPTY_PORT_MASK; - } - -private: - size_t out_rank = 0; -}; - -class FCShapeInferFactory : public ShapeInferFactory { -public: - FCShapeInferFactory(std::shared_ptr op) : m_op(op) {} - ShapeInferPtr makeShapeInfer() const override { - return std::make_shared(m_op->get_output_partial_shape(0).rank().get_length()); - } - -private: - std::shared_ptr m_op; -}; - } // namespace bool FullyConnected::isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept { diff --git a/src/plugins/intel_cpu/src/nodes/gather.cpp b/src/plugins/intel_cpu/src/nodes/gather.cpp index 06314ca17c6..cf54fcd0b6f 100644 --- a/src/plugins/intel_cpu/src/nodes/gather.cpp +++ b/src/plugins/intel_cpu/src/nodes/gather.cpp @@ -11,8 +11,8 @@ #include "common/cpu_memcpy.h" #include #include "kernels/x64/gather_uni_kernel.hpp" -#include "utils/shape_inference/shape_inference_cpu.hpp" #include +#include "shape_inference/custom/gather.hpp" using namespace InferenceEngine; using namespace dnnl::impl::cpu; @@ -43,76 +43,6 @@ bool Gather::isSupportedOperation(const std::shared_ptr& op, std return true; } -namespace { -class GatherShapeInfer : public ShapeInferEmptyPads { -public: - GatherShapeInfer(bool isAxisInputConst, bool isIndicesScalar, int axis, int batchDims) : m_isAxisInputConst(isAxisInputConst), - m_isIndicesScalar(isIndicesScalar), m_axis(axis), m_batchDims(batchDims) {} - Result infer(const std::vector>& input_shapes, - const std::unordered_map& data_dependency) override { - static constexpr size_t GATHER_DATA = 0, GATHER_INDICES = 1, GATHER_AXIS = 2; - - const auto& input_shape = input_shapes[GATHER_DATA].get(); - - // Use VectorDims{} instead of {1} for Scalar - const auto& indices_shape = m_isIndicesScalar ? VectorDims{} : input_shapes[GATHER_INDICES].get(); - - if (!m_isAxisInputConst) { - if (data_dependency.at(GATHER_AXIS)->getDesc().getPrecision() != Precision::I32) { - IE_THROW() << "Unsupported precision " << data_dependency.at(GATHER_AXIS)->getDesc().getPrecision() - << " for axis tensor."; - } - m_axis = reinterpret_cast(data_dependency.at(GATHER_AXIS)->getData())[0]; - } - - if (m_axis < 0) - m_axis += input_shape.size(); - if (m_batchDims < 0) - m_batchDims += indices_shape.size(); - - VectorDims output_shape; - output_shape.reserve(input_shape.size() + indices_shape.size() - m_batchDims - 1); - output_shape.insert(output_shape.end(), input_shape.begin(), input_shape.begin() + m_axis); - output_shape.insert(output_shape.end(), indices_shape.begin() + m_batchDims, indices_shape.end()); - output_shape.insert(output_shape.end(), input_shape.begin() + m_axis + 1, input_shape.end()); - - return {{std::move(output_shape)}, ShapeInferStatus::success}; - } - port_mask_t get_port_mask() const override { - return PortMask(2); - } - -private: - bool m_isAxisInputConst = false; - bool m_isIndicesScalar = false; - int m_axis = 0; - int m_batchDims = 0; -}; - -class GatherShapeInferFactory : public ShapeInferFactory { -public: - GatherShapeInferFactory(std::shared_ptr op) : m_op(op) {} - ShapeInferPtr makeShapeInfer() const override { - static constexpr size_t GATHER_INDICES = 1, GATHER_AXIS = 2; - - bool isAxisInputConst = ov::is_type(m_op->get_input_node_ptr(GATHER_AXIS)); - const auto& indicesShape = m_op->get_input_partial_shape(GATHER_INDICES); - if (!indicesShape.rank().is_static()) - IE_THROW() << "indicesShape do not support dynamic rank."; - bool isIndicesScalar = indicesShape.rank().get_length() == 0; - - int axis = isAxisInputConst ? ov::as_type(m_op->get_input_node_ptr(GATHER_AXIS))->cast_vector()[0] : 0; - int batchDims = ov::is_type(m_op) ? static_cast(ov::as_type_ptr(m_op)->get_batch_dims()) : ( - ov::is_type(m_op) ? static_cast(ov::as_type_ptr(m_op)->get_batch_dims()) : 0); - - return std::make_shared(isAxisInputConst, isIndicesScalar, axis, batchDims); - } - -private: - std::shared_ptr m_op; -}; -} // namespace - Gather::Gather(const std::shared_ptr& op, const GraphContext::CPtr context) : Node(op, context, GatherShapeInferFactory(op)), batchDims(0) { diff --git a/src/plugins/intel_cpu/src/nodes/generate_proposals.cpp b/src/plugins/intel_cpu/src/nodes/generate_proposals.cpp index 9e9e5bf2ffe..7e7c8062ec7 100644 --- a/src/plugins/intel_cpu/src/nodes/generate_proposals.cpp +++ b/src/plugins/intel_cpu/src/nodes/generate_proposals.cpp @@ -18,7 +18,7 @@ #include "ie_parallel.hpp" #include "common/cpu_memcpy.h" #include "generate_proposals.h" -#include +#include namespace ov { namespace intel_cpu { diff --git a/src/plugins/intel_cpu/src/nodes/if.cpp b/src/plugins/intel_cpu/src/nodes/if.cpp index 69b3c7baab7..8918afa3d06 100644 --- a/src/plugins/intel_cpu/src/nodes/if.cpp +++ b/src/plugins/intel_cpu/src/nodes/if.cpp @@ -8,7 +8,7 @@ #include "ie_ngraph_utils.hpp" #include "transformations/utils/utils.hpp" #include "common/cpu_memcpy.h" -#include +#include #include #include diff --git a/src/plugins/intel_cpu/src/nodes/input.cpp b/src/plugins/intel_cpu/src/nodes/input.cpp index 542f0cfcaa2..8ae0ddc46b2 100644 --- a/src/plugins/intel_cpu/src/nodes/input.cpp +++ b/src/plugins/intel_cpu/src/nodes/input.cpp @@ -21,7 +21,7 @@ #include "utils/cpu_utils.hpp" #include #include "memory_desc/dnnl_blocked_memory_desc.h" -#include "utils/shape_inference/shape_inference_pass_through.hpp" +#include "shape_inference/shape_inference_pass_through.hpp" using namespace dnnl; using namespace InferenceEngine; diff --git a/src/plugins/intel_cpu/src/nodes/interpolate.cpp b/src/plugins/intel_cpu/src/nodes/interpolate.cpp index a71f0df5b5f..60a1259f43a 100644 --- a/src/plugins/intel_cpu/src/nodes/interpolate.cpp +++ b/src/plugins/intel_cpu/src/nodes/interpolate.cpp @@ -26,11 +26,11 @@ #include #include #include -#include -#include +#include +#include #include #include "utils/cpu_utils.hpp" -#include +#include using namespace dnnl; using namespace InferenceEngine; @@ -3976,4 +3976,4 @@ bool Interpolate::created() const { } // namespace node } // namespace intel_cpu -} // namespace ov \ No newline at end of file +} // namespace ov diff --git a/src/plugins/intel_cpu/src/nodes/lrn.cpp b/src/plugins/intel_cpu/src/nodes/lrn.cpp index 2dd2ebdd515..fa7aa2bddc9 100644 --- a/src/plugins/intel_cpu/src/nodes/lrn.cpp +++ b/src/plugins/intel_cpu/src/nodes/lrn.cpp @@ -9,7 +9,7 @@ #include #include "memory_desc/dnnl_blocked_memory_desc.h" #include -#include +#include #include #include diff --git a/src/plugins/intel_cpu/src/nodes/mathematics.cpp b/src/plugins/intel_cpu/src/nodes/mathematics.cpp index 926e09fd977..b8662b044bf 100644 --- a/src/plugins/intel_cpu/src/nodes/mathematics.cpp +++ b/src/plugins/intel_cpu/src/nodes/mathematics.cpp @@ -10,7 +10,7 @@ #include "ie_parallel.hpp" #include "mathematics.h" #include "utils/general_utils.h" -#include +#include using namespace InferenceEngine; diff --git a/src/plugins/intel_cpu/src/nodes/matmul.cpp b/src/plugins/intel_cpu/src/nodes/matmul.cpp index 924fabe32d8..69e0b2c9bec 100644 --- a/src/plugins/intel_cpu/src/nodes/matmul.cpp +++ b/src/plugins/intel_cpu/src/nodes/matmul.cpp @@ -22,7 +22,7 @@ #include #include #include - +#include "shape_inference/custom/matmul.hpp" using namespace dnnl; using namespace InferenceEngine; @@ -114,80 +114,6 @@ bool MatMul::isSupportedOperation(const std::shared_ptr& op, return true; } -namespace { -class MMShapeInfer : public ShapeInferEmptyPads { -public: - MMShapeInfer(const size_t& out_rank, const bool& transpose_a, const bool& transpose_b) : - m_out_rank(out_rank), m_transpose_a(transpose_a), m_transpose_b(transpose_b) { - m_shapeY = VectorDims(m_out_rank, 1); // for output and cache - } - Result infer( - const std::vector>& input_shapes, - const std::unordered_map& data_dependency) override { - const VectorDims& shapeA = input_shapes[0].get(); - const VectorDims& shapeB = input_shapes[1].get(); - const size_t rankA = shapeA.size(); - const size_t rankB = shapeB.size(); - - // getSupportedDescriptors has done some shape check. - // 1. Needn't assert the scalar type since the matmul_shape_inference has checked. - // 2. Needn't check the compatibility of the last two dims - // 3. 1-D x 1-D is needed - // 4. transpose is necessary - // 5. Just support the same rank of matmul - // 6. simplify the broadcast check - if (rankA == 1 && rankB == 1 && shapeA[0] == shapeB[0]) { - return {{m_shapeY}, ShapeInferStatus::success}; - } - - m_shapeY[m_out_rank-2] = m_transpose_a ? shapeA[rankA-1] : shapeA[rankA-2]; - m_shapeY[m_out_rank-1] = m_transpose_b ? shapeB[rankB-2] : shapeB[rankB-1]; - - for (size_t i=0; i < m_out_rank-2; ++i) { - if (shapeA[i] != shapeB[i]) { - if (shapeB[i] == 1) { - m_shapeY[i] = shapeA[i]; - continue; - } else if (shapeA[i] != 1) { - IE_THROW() << "Incompatible MatMul batch dimension. Cant merge the first input dimension=" << - shapeA[i] << " with second input dimension=" << shapeB[i] << " at index=" << i; - } - } - m_shapeY[i] = shapeB[i]; - } - - return {{m_shapeY}, ShapeInferStatus::success}; - } - - port_mask_t get_port_mask() const override { - return EMPTY_PORT_MASK; - } - -private: - VectorDims m_shapeY; - const size_t m_out_rank; - const bool m_transpose_a; - const bool m_transpose_b; -}; - -class MMShapeInferFactory : public ShapeInferFactory { -public: - MMShapeInferFactory(const std::shared_ptr& op) : m_op(op) {} - ShapeInferPtr makeShapeInfer() const override { - if (const auto matmul = ov::as_type_ptr(m_op)) { - const auto output_rank = matmul->get_output_partial_shape(0).rank().get_length(); - const bool transpose_a = matmul->get_transpose_a(); - const bool transpose_b = matmul->get_transpose_b(); - return std::make_shared(output_rank, transpose_a, transpose_b); - } else { - IE_THROW() << "Unexpected operation type in the MatMul shape inference factory"; - } - } -private: - std::shared_ptr m_op; -}; -} // namespace - MatMul::MatMul(const std::shared_ptr& op, const GraphContext::CPtr context) : Node(op, context, MMShapeInferFactory(op)), withBiases(false) { std::string errorMessage; diff --git a/src/plugins/intel_cpu/src/nodes/matrix_nms.cpp b/src/plugins/intel_cpu/src/nodes/matrix_nms.cpp index 1e910d04e76..9a3221e7261 100644 --- a/src/plugins/intel_cpu/src/nodes/matrix_nms.cpp +++ b/src/plugins/intel_cpu/src/nodes/matrix_nms.cpp @@ -14,7 +14,7 @@ #include "ie_parallel.hpp" #include "ngraph/opsets/opset8.hpp" #include "utils/general_utils.h" -#include +#include using namespace InferenceEngine; diff --git a/src/plugins/intel_cpu/src/nodes/multiclass_nms.cpp b/src/plugins/intel_cpu/src/nodes/multiclass_nms.cpp index 87803b746fc..72a252b9b45 100644 --- a/src/plugins/intel_cpu/src/nodes/multiclass_nms.cpp +++ b/src/plugins/intel_cpu/src/nodes/multiclass_nms.cpp @@ -17,7 +17,7 @@ #include "ie_parallel.hpp" #include "utils/general_utils.h" -#include +#include using namespace InferenceEngine; diff --git a/src/plugins/intel_cpu/src/nodes/ngram.cpp b/src/plugins/intel_cpu/src/nodes/ngram.cpp index 917f3f1ef70..1b3552b5100 100644 --- a/src/plugins/intel_cpu/src/nodes/ngram.cpp +++ b/src/plugins/intel_cpu/src/nodes/ngram.cpp @@ -9,43 +9,11 @@ #include "ie_parallel.hpp" #include "common/cpu_memcpy.h" #include "transformations/cpu_opset/common/op/ngram.hpp" +#include "shape_inference/custom/ngram.hpp" namespace ov { namespace intel_cpu { namespace node { -namespace { -class NgramShapeInfer : public ShapeInferEmptyPads { -public: - NgramShapeInfer(const size_t k) : m_k(k) {} - Result infer( - const std::vector>& input_shapes, - const std::unordered_map& data_dependency) override { - auto output_shape = input_shapes[0].get(); - output_shape[1] *= m_k; - return {{std::move(output_shape)}, ShapeInferStatus::success}; - } - port_mask_t get_port_mask() const override { - return EMPTY_PORT_MASK; - } - -private: - size_t m_k; -}; - -class NgramShapeInferFactory : public ShapeInferFactory { -public: - NgramShapeInferFactory(const std::shared_ptr& op) : m_op(op) {} - ShapeInferPtr makeShapeInfer() const override { - auto ngram = ov::as_type_ptr(m_op); - if (!ngram) { - IE_THROW(Unexpected) << "Wrong operation type"; - } - return std::make_shared(ngram->get_k()); - } -private: - std::shared_ptr m_op; -}; -} // namespace bool Ngram::isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept { try { diff --git a/src/plugins/intel_cpu/src/nodes/non_max_suppression.cpp b/src/plugins/intel_cpu/src/nodes/non_max_suppression.cpp index baeeb5ff091..bfd1d8fa982 100644 --- a/src/plugins/intel_cpu/src/nodes/non_max_suppression.cpp +++ b/src/plugins/intel_cpu/src/nodes/non_max_suppression.cpp @@ -18,7 +18,7 @@ #include "cpu/x64/jit_generator.hpp" #include "emitters/x64/jit_load_store_emitters.hpp" #include -#include +#include using namespace InferenceEngine; using namespace dnnl; diff --git a/src/plugins/intel_cpu/src/nodes/non_zero.cpp b/src/plugins/intel_cpu/src/nodes/non_zero.cpp index cbb0b134211..4571eaa9e8c 100644 --- a/src/plugins/intel_cpu/src/nodes/non_zero.cpp +++ b/src/plugins/intel_cpu/src/nodes/non_zero.cpp @@ -9,7 +9,7 @@ #include #include #include -#include +#include using namespace InferenceEngine; diff --git a/src/plugins/intel_cpu/src/nodes/normalize.cpp b/src/plugins/intel_cpu/src/nodes/normalize.cpp index fd25752af59..c4fc60a9d9e 100644 --- a/src/plugins/intel_cpu/src/nodes/normalize.cpp +++ b/src/plugins/intel_cpu/src/nodes/normalize.cpp @@ -23,7 +23,7 @@ #include "memory_desc/dnnl_blocked_memory_desc.h" #include "utils/cpu_utils.hpp" #include -#include +#include using namespace dnnl; using namespace InferenceEngine; diff --git a/src/plugins/intel_cpu/src/nodes/one_hot.cpp b/src/plugins/intel_cpu/src/nodes/one_hot.cpp index 5eefbf01313..de3c1e55af5 100644 --- a/src/plugins/intel_cpu/src/nodes/one_hot.cpp +++ b/src/plugins/intel_cpu/src/nodes/one_hot.cpp @@ -11,9 +11,8 @@ #include #include #include -#include -#include #include "common/cpu_memcpy.h" +#include "shape_inference/custom/one_hot.hpp" using namespace InferenceEngine; @@ -21,58 +20,6 @@ namespace ov { namespace intel_cpu { namespace node { -namespace { -/** - * Implements One Hot shape inference algorithm. The output shape is the input `indices` tensor shape, where a new axis - * of size `depth` is inserted at the dimension defined by the `axis` parameter. - * - */ -class OneHotShapeInfer : public ShapeInferEmptyPads { -public: - explicit OneHotShapeInfer(int64_t axis) : m_axis(axis) {} - Result infer( - const std::vector>& input_shapes, - const std::unordered_map& data_dependency) override { - auto depth = reinterpret_cast(data_dependency.at(1)->getData())[0]; - - auto result = input_shapes.front().get(); - result.insert(result.begin() + m_axis, depth); - - return {{std::move(result)}, ShapeInferStatus::success}; - } - - port_mask_t get_port_mask() const override { - return PortMask(1); - } - -private: - int64_t m_axis = 0; -}; - -class OneHotShapeInferFactory : public ShapeInferFactory { -public: - OneHotShapeInferFactory(std::shared_ptr op) : m_op(op) {} - ShapeInferPtr makeShapeInfer() const override { - auto oneHot = ov::as_type_ptr(m_op); - if (!oneHot) { - IE_THROW() << "Unexpected op type in OneHot shape inference factory: " << m_op->get_type_name(); - } - auto axis = oneHot->get_axis(); - auto dstShape = oneHot->get_output_partial_shape(0); - int output_dims_size = dstShape.size(); - if (0 == output_dims_size) output_dims_size = 1; - if (axis < 0) { - axis += output_dims_size; - } - return std::make_shared(axis); - } - -private: - std::shared_ptr m_op; -}; - -} // namespace - bool OneHot::isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept { try { const auto oneHot = std::dynamic_pointer_cast(op); diff --git a/src/plugins/intel_cpu/src/nodes/priorbox.cpp b/src/plugins/intel_cpu/src/nodes/priorbox.cpp index f1832c37bd7..edc1db859f2 100644 --- a/src/plugins/intel_cpu/src/nodes/priorbox.cpp +++ b/src/plugins/intel_cpu/src/nodes/priorbox.cpp @@ -13,6 +13,7 @@ #include #include #include +#include "shape_inference/custom/priorbox.hpp" using namespace InferenceEngine; @@ -22,49 +23,6 @@ namespace ov { namespace intel_cpu { namespace node { namespace { -class PriorBoxShapeInfer : public ShapeInferEmptyPads { -/** - * Implements Prior Box Clustered shape inference algorithm. The output shape is [2, 4 * height * width * number_of_priors]. - * `number_of_priors` is an attribute of the operation. heigh and width are in the the first input parameter. - * - */ -public: - explicit PriorBoxShapeInfer(int64_t number_of_priors) : m_number_of_priors(number_of_priors) {} - Result infer( - const std::vector>& input_shapes, - const std::unordered_map& data_dependency) override { - const int* in_data = reinterpret_cast(data_dependency.at(0)->getData()); - const int H = in_data[0]; - const int W = in_data[1]; - const auto output = static_cast(4 * H * W * m_number_of_priors); - return {{{2, output}}, ShapeInferStatus::success}; - } - - port_mask_t get_port_mask() const override { - return PortMask(0); - } - -private: - int64_t m_number_of_priors = 0; -}; - -class PriorBoxShapeInferFactory : public ShapeInferFactory { -public: - explicit PriorBoxShapeInferFactory(std::shared_ptr op) : m_op(op) {} - ShapeInferPtr makeShapeInfer() const override { - auto priorBox = ov::as_type_ptr(m_op); - if (!priorBox) { - IE_THROW() << "Unexpected op type in PriorBox shape inference factory: " << m_op->get_type_name(); - } - const auto& attrs = priorBox->get_attrs(); - auto number_of_priors = ngraph::opset1::PriorBox::number_of_priors(attrs); - return std::make_shared(number_of_priors); - } - -private: - std::shared_ptr m_op; -}; - float clip_great(float x, float threshold) { return x < threshold ? x : threshold; } diff --git a/src/plugins/intel_cpu/src/nodes/priorbox_clustered.cpp b/src/plugins/intel_cpu/src/nodes/priorbox_clustered.cpp index 2ab36190b85..3f8fa86bc63 100644 --- a/src/plugins/intel_cpu/src/nodes/priorbox_clustered.cpp +++ b/src/plugins/intel_cpu/src/nodes/priorbox_clustered.cpp @@ -13,59 +13,13 @@ #include #include #include +#include "shape_inference/custom/priorbox_clustered.hpp" using namespace InferenceEngine; namespace ov { namespace intel_cpu { namespace node { - -namespace { -/** - * Implements Prior Box Clustered shape inference algorithm. The output shape is [2, 4 * height * width * number_of_priors]. - * `number_of_priors` is an attribute of the operation. heigh and width are in the the first input parameter. - * - */ -class PriorBoxClusteredShapeInfer : public ShapeInferEmptyPads { -public: - explicit PriorBoxClusteredShapeInfer(size_t number_of_priors) : m_number_of_priors(number_of_priors) {} - Result infer( - const std::vector>& input_shapes, - const std::unordered_map& data_dependency) override { - const int* in_data = reinterpret_cast(data_dependency.at(0)->getData()); - const int H = in_data[0]; - const int W = in_data[1]; - const auto output = static_cast(4 * H * W * m_number_of_priors); - return {{{2, output}}, ShapeInferStatus::success}; - } - - port_mask_t get_port_mask() const override { - return PortMask(0); - } - -private: - size_t m_number_of_priors = 0; -}; - -class PriorBoxClusteredShapeInferFactory : public ShapeInferFactory { -public: - explicit PriorBoxClusteredShapeInferFactory(std::shared_ptr op) : m_op(op) {} - ShapeInferPtr makeShapeInfer() const override { - auto priorBox = ov::as_type_ptr(m_op); - if (!priorBox) { - IE_THROW() << "Unexpected op type in PriorBoxClustered shape inference factory: " << m_op->get_type_name(); - } - const auto& attrs = priorBox->get_attrs(); - auto number_of_priors = attrs.widths.size(); - return std::make_shared(number_of_priors); - } - -private: - std::shared_ptr m_op; -}; - -} // namespace - bool PriorBoxClustered::isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept { try { const auto priorBox = std::dynamic_pointer_cast(op); diff --git a/src/plugins/intel_cpu/src/nodes/range.cpp b/src/plugins/intel_cpu/src/nodes/range.cpp index c7b47e55449..a28665b9666 100644 --- a/src/plugins/intel_cpu/src/nodes/range.cpp +++ b/src/plugins/intel_cpu/src/nodes/range.cpp @@ -7,7 +7,7 @@ #include "ie_parallel.hpp" #include "range.h" #include -#include +#include using namespace InferenceEngine; diff --git a/src/plugins/intel_cpu/src/nodes/reorder.cpp b/src/plugins/intel_cpu/src/nodes/reorder.cpp index bb1be492515..14c1cda0468 100644 --- a/src/plugins/intel_cpu/src/nodes/reorder.cpp +++ b/src/plugins/intel_cpu/src/nodes/reorder.cpp @@ -16,7 +16,7 @@ #include "nodes/common/reorder_prim.h" #include "convert.h" #include -#include +#include using namespace dnnl; using namespace InferenceEngine; diff --git a/src/plugins/intel_cpu/src/nodes/reshape.cpp b/src/plugins/intel_cpu/src/nodes/reshape.cpp index 58b59b0dbfa..e271d961308 100644 --- a/src/plugins/intel_cpu/src/nodes/reshape.cpp +++ b/src/plugins/intel_cpu/src/nodes/reshape.cpp @@ -9,9 +9,7 @@ #include #include #include -#include -#include -#include "utils/shape_inference/shape_inference_cpu.hpp" +#include "shape_inference/custom/reshape.hpp" #include "common/cpu_memcpy.h" @@ -36,196 +34,6 @@ bool Reshape::isSupportedOperation(const std::shared_ptr& op return true; } -namespace { -class ReshapeShapeInfer : public ShapeInferEmptyPads { -public: - ReshapeShapeInfer(bool specialZero) : m_specialZero(specialZero) {} - Result infer(const std::vector>& input_shapes, - const std::unordered_map& data_dependency) override { - static constexpr size_t RESHAPE_SRC = 0, RESHAPE_PATTERN = 1; - const auto& inputShape = input_shapes[RESHAPE_SRC].get(); - const size_t inputShapeSize = inputShape.size(); - const auto memPtr = data_dependency.at(RESHAPE_PATTERN); - const auto data = memPtr->getData(); - const auto& dims = memPtr->getStaticDims(); - const auto outputPatternSize = std::accumulate(dims.begin(), dims.end(), 1, std::multiplies()); - std::vector outPattern = ov::get_raw_data_as( - InferenceEngine::details::convertPrecision(memPtr->getDesc().getPrecision()), - data, - outputPatternSize, - ov::util::Cast()); - VectorDims outputShape(outputPatternSize); - size_t outputProduct = 1; - int32_t minusOneIdx = -1; - int32_t minusOneCount = 0; - for (int32_t i = 0; i < outputPatternSize; ++i) { - if (outPattern[i] == 0 && m_specialZero && i < static_cast(inputShapeSize)) { - outputShape[i] = inputShape[i]; - } else if (outPattern[i] == -1) { - minusOneIdx = i; - minusOneCount++; - } else { - outputShape[i] = outPattern[i]; - outputProduct *= outputShape[i]; - } - } - size_t inputProduct = 1; - for (size_t i = 0; i < inputShapeSize; ++i) { - if (static_cast(i) < outputPatternSize && outPattern[i] == 0 && m_specialZero) - continue; - inputProduct *= inputShape[i]; - } - if (minusOneIdx >= 0) { - if (outputProduct != 0) { - outputShape[minusOneIdx] = inputProduct / outputProduct; - outputProduct *= outputShape[minusOneIdx]; - } else { - outputShape[minusOneIdx] = 0; - } - } - if (minusOneCount > 1 || inputProduct != outputProduct) { - IE_THROW(Unexpected) << "[cpu]reshape: the shape of input data conflicts with the reshape pattern"; - } - return {{std::move(outputShape)}, ShapeInferStatus::success}; - } - port_mask_t get_port_mask() const override { - return PortMask(1); - } - -private: - bool m_specialZero; -}; - -class SqueezeShapeInfer : public ShapeInferEmptyPads { -public: - SqueezeShapeInfer() {} - Result infer(const std::vector>& input_shapes, - const std::unordered_map& data_dependency) override { - static constexpr size_t SQUEEZE_SRC = 0, SQUEEZE_PATTERN = 1; - const auto& inputShape = input_shapes[SQUEEZE_SRC].get(); - const size_t inputShapeSize = inputShape.size(); - auto itr = data_dependency.find(SQUEEZE_PATTERN); - VectorDims outputShape; - outputShape.reserve(inputShapeSize); - if (itr != data_dependency.end()) { - const auto memPtr = data_dependency.at(SQUEEZE_PATTERN); - const auto data = memPtr->getData(); - const auto& dims = memPtr->getStaticDims(); - const auto outputPatternSize = std::accumulate(dims.begin(), dims.end(), 1, std::multiplies()); - std::vector outPattern = ov::get_raw_data_as( - InferenceEngine::details::convertPrecision(memPtr->getDesc().getPrecision()), - data, - outputPatternSize, - ov::util::Cast()); - std::vector removeMask(inputShapeSize, false); - bool existError = false; - for (int i = 0; i < outputPatternSize; i++) { - if (outPattern[i] < 0) { - outPattern[i] = inputShapeSize + outPattern[i]; - } - if (outPattern[i] >= 0 && outPattern[i] < static_cast(inputShapeSize)) { - removeMask[outPattern[i]] = true; - } else { - existError = true; - break; - } - } - for (size_t i = 0; i < inputShapeSize; i++) { - if (!removeMask[i]) { - outputShape.push_back(inputShape[i]); - } else if (inputShape[i] != 1) { - existError = true; - break; - } - } - if (existError) { - IE_THROW(Unexpected) << "[cpu]squeeze: the shape of input data conflict with the squeeze pattern"; - } - } else { - for (size_t i = 0; i < inputShapeSize; i++) { - if (inputShape[i] != 1) { - outputShape.push_back(inputShape[i]); - } - } - } - return {{std::move(outputShape)}, ShapeInferStatus::success}; - } - port_mask_t get_port_mask() const override { - return PortMask(1); - } -}; - -class UnsqueezeShapeInfer : public ShapeInferEmptyPads { -public: - UnsqueezeShapeInfer() {} - Result infer(const std::vector>& input_shapes, - const std::unordered_map& data_dependency) override { - static constexpr size_t UNSQUEEZE_SRC = 0, UNSQUEEZE_PATTERN = 1; - const auto& inputShape = input_shapes[UNSQUEEZE_SRC].get(); - const size_t inputShapeSize = inputShape.size(); - const auto memPtr = data_dependency.at(UNSQUEEZE_PATTERN); - const auto data = memPtr->getData(); - const auto& dims = memPtr->getStaticDims(); - const auto outputPatternSize = std::accumulate(dims.begin(), dims.end(), 1, std::multiplies()); - std::vector outPattern = ov::get_raw_data_as( - InferenceEngine::details::convertPrecision(memPtr->getDesc().getPrecision()), - data, - outputPatternSize, - ov::util::Cast()); - size_t outputShapeSize = inputShapeSize + outputPatternSize; - VectorDims outputShape(outputShapeSize, 0); - bool existError = false; - for (int i = 0; i < outputPatternSize; i++) { - if (outPattern[i] < 0) { - outPattern[i] = outputShapeSize + outPattern[i]; - } - if (outPattern[i] >= 0 && outPattern[i] < static_cast(outputShapeSize)) { - outputShape[outPattern[i]] = 1; - } else { - existError = true; - break; - } - } - for (size_t i = 0, y = 0; i < outputShapeSize; i++) { - if (outputShape[i] == 0) { - if (y < inputShapeSize) { - outputShape[i] = inputShape[y]; - y++; - } else { - existError = true; - break; - } - } - } - if (existError) { - IE_THROW(Unexpected) << "[cpu]unsqueeze: the shape of input data conflicts with the unsqueeze pattern"; - } - return {{std::move(outputShape)}, ShapeInferStatus::success}; - } - port_mask_t get_port_mask() const override { - return PortMask(1); - } -}; - -class ReshapeShapeInferFactory : public ShapeInferFactory { -public: - ReshapeShapeInferFactory(std::shared_ptr op) : m_op(op) {} - ShapeInferPtr makeShapeInfer() const override { - if (const auto reshapeOp = ov::as_type_ptr(m_op)) { - return std::make_shared(reshapeOp->get_special_zero()); - } else if (ov::is_type(m_op)) { - return std::make_shared(); - } else if (ov::is_type(m_op)) { - return std::make_shared(); - } else { - IE_THROW(Unexpected) << "[cpu]reshape: " << m_op->get_type_name() << "is not implemented"; - } - } -private: - std::shared_ptr m_op; -}; -} // namespace - Reshape::Reshape(const std::shared_ptr& op, const GraphContext::CPtr context) : Node(op, context, ReshapeShapeInferFactory(op)) { std::string errorMessage; diff --git a/src/plugins/intel_cpu/src/nodes/rnn.cpp b/src/plugins/intel_cpu/src/nodes/rnn.cpp index 19f4fedb87a..9992f0f392b 100644 --- a/src/plugins/intel_cpu/src/nodes/rnn.cpp +++ b/src/plugins/intel_cpu/src/nodes/rnn.cpp @@ -13,7 +13,7 @@ #include "memory_desc/dnnl_blocked_memory_desc.h" #include #include -#include +#include #include "transformations/utils/utils.hpp" #include "ov_ops/augru_cell.hpp" @@ -351,9 +351,9 @@ bool RNN::testNativeOrder(const std::shared_ptr& op) { namespace { /** - * Extends Rnn ngraph shape inference implementation. The main purpose of this class is to do the trick with + * Extends Rnn ngraph shape inference implementation. The main purpose of this class is to do the trick with * dimentions permutation, necessary due to the mismatch between the ngrpah and the oneDNN RNN node descriptions. - * + * */ class RnnShapeInfer : public NgraphShapeInfer { public: diff --git a/src/plugins/intel_cpu/src/nodes/shapeof.cpp b/src/plugins/intel_cpu/src/nodes/shapeof.cpp index e3b9a8bcc81..77cb5c614db 100644 --- a/src/plugins/intel_cpu/src/nodes/shapeof.cpp +++ b/src/plugins/intel_cpu/src/nodes/shapeof.cpp @@ -4,7 +4,7 @@ #include "shapeof.h" #include -#include +#include "shape_inference/custom/shapeof.hpp" using namespace InferenceEngine; @@ -12,35 +12,6 @@ namespace ov { namespace intel_cpu { namespace node { -namespace { -/** - * Implements Shape Of shape inference algorithm. The output shape is simply a 1D tensor with the size of the input tensor - * rank. - * - */ -class ShapeOfShapeInfer : public ShapeInferEmptyPads { -public: - ShapeOfShapeInfer() = default; - Result infer( - const std::vector>& input_shapes, - const std::unordered_map& data_dependency) override { - IE_ASSERT(!input_shapes.empty()); - return {{VectorDims{input_shapes.front().get().size()}}, ShapeInferStatus::success}; - } - - port_mask_t get_port_mask() const override { - return EMPTY_PORT_MASK; - } -}; - -class ShapeOfShapeInferFactory : public ShapeInferFactory { -public: - ShapeInferPtr makeShapeInfer() const override { - return std::make_shared(); - } -}; -} // namespace - bool ShapeOf::isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept { try { if (!one_of(op->get_type_info(), diff --git a/src/plugins/intel_cpu/src/nodes/softmax.cpp b/src/plugins/intel_cpu/src/nodes/softmax.cpp index 6d9768492c0..51f3e2fba98 100644 --- a/src/plugins/intel_cpu/src/nodes/softmax.cpp +++ b/src/plugins/intel_cpu/src/nodes/softmax.cpp @@ -11,7 +11,7 @@ #include #include "memory_desc/dnnl_blocked_memory_desc.h" #include -#include +#include using namespace dnnl; using namespace InferenceEngine; diff --git a/src/plugins/intel_cpu/src/nodes/strided_slice.cpp b/src/plugins/intel_cpu/src/nodes/strided_slice.cpp index aa4dae10df7..83eb62d4ad6 100644 --- a/src/plugins/intel_cpu/src/nodes/strided_slice.cpp +++ b/src/plugins/intel_cpu/src/nodes/strided_slice.cpp @@ -8,8 +8,9 @@ #include "common/cpu_memcpy.h" #include "input.h" #include -#include +#include #include "slice_shape_inference_utils.hpp" +#include "shape_inference/custom/strided_slice.hpp" #include @@ -34,111 +35,6 @@ bool StridedSlice::isSupportedOperation(const std::shared_ptr& o return true; } -namespace { - -constexpr IShapeInfer::port_mask_t port_mask = PortMask(/*BEGIN_ID*/1, /*END_ID*/2, /*STRIDE_ID*/3, /*AXES_ID*/4); - -class StridedSliceShapeInfer : public ShapeInferEmptyPads { -public: - StridedSliceShapeInfer(size_t output_size, - std::unordered_set begin_mask, - std::unordered_set end_mask, - std::unordered_set new_axis_mask, - std::unordered_set shrink_axis_mask) - : m_outputShape(output_size, 1), - m_begin_mask_set(std::move(begin_mask)), - m_end_mask_set(std::move(end_mask)), - m_new_axis_mask_set(std::move(new_axis_mask)), - m_shrink_axis_mask_set(std::move(shrink_axis_mask)) {} - - Result infer( - const std::vector>& input_shapes, - const std::unordered_map& data_dependency) override { - // align with intel_cpu::node::StridedSlice - static constexpr size_t DATA_ID = 0, BEGIN_ID = 1, END_ID = 2, STRIDE_ID = 3; - const VectorDims& shapeIn = input_shapes[DATA_ID].get(); - const VectorDims& shapeBegin = input_shapes[BEGIN_ID].get(); - if (data_dependency.at(BEGIN_ID)->getDesc().getPrecision() != Precision::I32 || - data_dependency.at(END_ID)->getDesc().getPrecision() != Precision::I32 || - data_dependency.at(STRIDE_ID)->getDesc().getPrecision() != Precision::I32) { - IE_THROW(Unexpected) << "The data type of begin/end/stride is NOT I32, which is unexpected!"; - } - auto beginPtr = reinterpret_cast(data_dependency.at(BEGIN_ID)->getData()); - auto endPtr = reinterpret_cast(data_dependency.at(END_ID)->getData()); - auto stridePtr = reinterpret_cast(data_dependency.at(STRIDE_ID)->getData()); - - for (size_t i = 0, new_idx = 0; i < shapeIn.size(); ++i) { - if (m_new_axis_mask_set.count(i)) { - // deal with new_axis_mask - m_outputShape[new_idx] = 1; - m_outputShape[new_idx+1] = shapeIn[i]; - new_idx+=2; - } else if (!m_shrink_axis_mask_set.count(i)) { - // deal with begin_mask and end_mask - if ((i >= shapeBegin[0]) || (shapeIn[i] == 0)) { - m_outputShape[new_idx] = shapeIn[i]; - } else { - auto begin = m_begin_mask_set.count(i) ? 0 : beginPtr[i]; - auto end = m_end_mask_set.count(i) ? shapeIn[i] : endPtr[i]; - m_outputShape[new_idx] = ov::op::slice::get_sliced_value(shapeIn[i], begin, end, stridePtr[i]); - } - new_idx += 1; - } - } - return {{m_outputShape}, ShapeInferStatus::success}; - } - - port_mask_t get_port_mask() const override { - return port_mask; - } - -private: - VectorDims m_outputShape; - const std::unordered_set m_begin_mask_set; - const std::unordered_set m_end_mask_set; - const std::unordered_set m_new_axis_mask_set; - const std::unordered_set m_shrink_axis_mask_set; -}; - -class StridedSliceShapeInferFactory : public ShapeInferFactory { -public: - StridedSliceShapeInferFactory(const std::shared_ptr& op) - : m_op(op) {} - ShapeInferPtr makeShapeInfer() const override { - if (const auto Slice_op = ov::as_type_ptr(m_op)) { - return std::make_shared(make_shape_inference(m_op), port_mask); - } else if (const auto StridedSlice_op = ov::as_type_ptr(m_op)) { - const auto& ellipsis_mask = StridedSlice_op->get_ellipsis_mask(); - if (std::any_of(ellipsis_mask.begin(), ellipsis_mask.end(), [](int64_t x){ return x == 1; })) { - return std::make_shared(make_shape_inference(m_op), port_mask); - } else { - auto vec_to_set = [](const std::vector& vec){ - std::unordered_set to_set; - for (size_t i = 0; i < vec.size(); ++i) { - if (vec[i] == 1) { - to_set.emplace(i); - } - } - return to_set; - }; - return std::make_shared( - m_op->get_output_partial_shape(0).rank().get_length(), - vec_to_set(StridedSlice_op->get_begin_mask()), - vec_to_set(StridedSlice_op->get_end_mask()), - vec_to_set(StridedSlice_op->get_new_axis_mask()), - vec_to_set(StridedSlice_op->get_shrink_axis_mask())); - } - } else { - IE_THROW(NotImplemented) << "not Slice or StridedSlice"; - } - } - -private: - const std::shared_ptr m_op; -}; - -} // namespace - StridedSlice::StridedSlice(const std::shared_ptr& op, const GraphContext::CPtr context) : Node(op, context, StridedSliceShapeInferFactory(op)) { std::string errorMessage; diff --git a/src/plugins/intel_cpu/src/nodes/subgraph.cpp b/src/plugins/intel_cpu/src/nodes/subgraph.cpp index c6e8f4c0316..ace2064e6b4 100644 --- a/src/plugins/intel_cpu/src/nodes/subgraph.cpp +++ b/src/plugins/intel_cpu/src/nodes/subgraph.cpp @@ -33,6 +33,7 @@ #include "transformations/snippets/x64/pass/set_brgemm_cpu_blocking_params.hpp" #include "transformations/cpu_opset/common/pass/convert_to_swish_cpu.hpp" #include "transformations/defs.hpp" +#include "shape_inference/custom/subgraph.hpp" using namespace InferenceEngine; using namespace dnnl::impl::utils; @@ -43,38 +44,6 @@ using namespace Xbyak; namespace ov { namespace intel_cpu { namespace node { -namespace { - -/* This class implementation is a temporal WA - TODO: revise the implementation to remove the node reference*/ -class SnippetShapeInfer : public ShapeInferEmptyPads { -public: - SnippetShapeInfer(Snippet* node) : m_node(node) {} - Result infer( - const std::vector>& input_shapes, - const std::unordered_map& data_dependency) override { - return {m_node->shapeInfer(), ShapeInferStatus::success}; - } - - port_mask_t get_port_mask() const override { - return EMPTY_PORT_MASK; - } - -private: - Snippet* m_node; -}; - -class SnippetShapeInferFactory : public ShapeInferFactory { -public: - SnippetShapeInferFactory(Snippet* node) : m_node(node) {} - ShapeInferPtr makeShapeInfer() const override { - return std::make_shared(m_node); - } - -private: - Snippet* m_node; -}; -} // namespace Snippet::Snippet(const std::shared_ptr& op, const GraphContext::CPtr context) : Node(op, context, SnippetShapeInferFactory(this)) { diff --git a/src/plugins/intel_cpu/src/nodes/tensoriterator.cpp b/src/plugins/intel_cpu/src/nodes/tensoriterator.cpp index be139aaf8d0..b38ae2fde7e 100644 --- a/src/plugins/intel_cpu/src/nodes/tensoriterator.cpp +++ b/src/plugins/intel_cpu/src/nodes/tensoriterator.cpp @@ -14,7 +14,7 @@ #include "transformations/utils/utils.hpp" #include "common/cpu_memcpy.h" #include "common/reorder_prim.h" -#include +#include using namespace dnnl; using namespace InferenceEngine; diff --git a/src/plugins/intel_cpu/src/nodes/transpose.cpp b/src/plugins/intel_cpu/src/nodes/transpose.cpp index 5bc50241016..2652447d0ac 100644 --- a/src/plugins/intel_cpu/src/nodes/transpose.cpp +++ b/src/plugins/intel_cpu/src/nodes/transpose.cpp @@ -10,7 +10,7 @@ #include #include #include - +#include "shape_inference/custom/transpose.hpp" using namespace dnnl; using namespace InferenceEngine; @@ -37,70 +37,6 @@ bool Transpose::isSupportedOperation(const std::shared_ptr& op, return true; } -namespace { -class TransposeDynShapeInfer : public ShapeInferEmptyPads { -public: - TransposeDynShapeInfer() = default; - Result infer( - const std::vector>& input_shapes, - const std::unordered_map& data_dependency) override { - IE_THROW(NotImplemented) << "TODO: Support parameterized Order input for dynamic shapes."; - } - port_mask_t get_port_mask() const override { - return EMPTY_PORT_MASK; - } -private: -}; - -class TransposeShapeInfer : public ShapeInferEmptyPads { -public: - TransposeShapeInfer(const size_t& out_rank, const std::vector& axes_vec) - : m_out_rank(out_rank), m_axes_vec(axes_vec), m_outputShape(out_rank, 1), m_needReverse(axes_vec.empty()) {} - - Result infer( - const std::vector>& input_shapes, - const std::unordered_map& data_dependency) override { - const VectorDims& shapeIn = input_shapes[0].get(); - if (m_needReverse) { - for (size_t i = 0; i < m_out_rank; ++i) { - m_outputShape[i] = shapeIn[m_out_rank - 1 - i]; - } - } else { - for (size_t i = 0; i < m_out_rank; ++i) { - m_outputShape[i] = shapeIn[m_axes_vec[i]]; - } - } - return {{m_outputShape}, ShapeInferStatus::success}; - } - - port_mask_t get_port_mask() const override { - return EMPTY_PORT_MASK; - } - -private: - const size_t m_out_rank; - const std::vector m_axes_vec; - VectorDims m_outputShape; - const bool m_needReverse; -}; - -class TransposeShapeInferFactory : public ShapeInferFactory { -public: - TransposeShapeInferFactory(const std::shared_ptr& op) : m_op(op) {} - ShapeInferPtr makeShapeInfer() const override { - if (const auto order = ov::as_type_ptr(m_op->get_input_node_shared_ptr(ov::op::v1::Transpose::ORDER))) { - const auto axes_vec = order->cast_vector(); - return std::make_shared(m_op->get_output_partial_shape(0).rank().get_length(), axes_vec); - } else { - return std::make_shared(); - } - } - -private: - const std::shared_ptr m_op; -}; -} // namespace - Transpose::Transpose(const std::shared_ptr& op, const GraphContext::CPtr context) : Node(op, context, TransposeShapeInferFactory(op)) { std::string errorMessage; diff --git a/src/plugins/intel_cpu/src/nodes/unique.cpp b/src/plugins/intel_cpu/src/nodes/unique.cpp index 5fbb3b4cebe..43a7dd062b2 100644 --- a/src/plugins/intel_cpu/src/nodes/unique.cpp +++ b/src/plugins/intel_cpu/src/nodes/unique.cpp @@ -7,7 +7,7 @@ #include "ie_parallel.hpp" #include #include "common/cpu_memcpy.h" -#include +#include using namespace InferenceEngine; using namespace ov::intel_cpu; diff --git a/src/plugins/intel_cpu/src/shape_inference/custom/adaptive_pooling.cpp b/src/plugins/intel_cpu/src/shape_inference/custom/adaptive_pooling.cpp new file mode 100644 index 00000000000..7bca800f589 --- /dev/null +++ b/src/plugins/intel_cpu/src/shape_inference/custom/adaptive_pooling.cpp @@ -0,0 +1,46 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "adaptive_pooling.hpp" +#include "utils.hpp" +#include "ie_ngraph_utils.hpp" + +namespace ov { +namespace intel_cpu { +namespace node { +using namespace InferenceEngine; + +/** + * Implements Adaptive Pooling shape inference algorithm. The output tensor shape consists of the input [N, C] dimensions and + * the [D_out, H_out, W_out] dimensions, which are placed in the second input parameter. + * + */ +Result AdaptivePoolingShapeInfer::infer( + const std::vector>& input_shapes, + const std::unordered_map& data_dependency) { + const auto& inputDims = input_shapes[0].get(); + const auto& spatialDims = input_shapes[1].get(); + const auto inputRank = inputDims.size(); + const auto spatialDimsSize = spatialDims[0]; + + VectorDims outputDims(inputRank); + outputDims[0] = inputDims[0]; + outputDims[1] = inputDims[1]; + auto newSpatialDimsPtr = reinterpret_cast(data_dependency.at(1)->getData()); + for (size_t i = 0; i < spatialDimsSize; i++) { + outputDims[i + 2] = newSpatialDimsPtr[i]; + } + + std::vector result(m_outputs_count, outputDims); + return {std::move(result), ShapeInferStatus::success}; +} + +ShapeInferPtr AdaptivePoolingShapeInferFactory::makeShapeInfer() const { + size_t outputs_count = m_op->get_output_size(); + return std::make_shared(outputs_count); +} + +} // namespace node +} // namespace intel_cpu +} // namespace ov diff --git a/src/plugins/intel_cpu/src/shape_inference/custom/adaptive_pooling.hpp b/src/plugins/intel_cpu/src/shape_inference/custom/adaptive_pooling.hpp new file mode 100644 index 00000000000..8ee7c772cb0 --- /dev/null +++ b/src/plugins/intel_cpu/src/shape_inference/custom/adaptive_pooling.hpp @@ -0,0 +1,46 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include "shape_inference/shape_inference_cpu.hpp" + +#pragma once +namespace ov { +namespace intel_cpu { +namespace node { +using Result = IShapeInfer::Result; + +/** + * Implements Adaptive Pooling shape inference algorithm. The output tensor shape consists of the input [N, C] dimensions and + * the [D_out, H_out, W_out] dimensions, which are placed in the second input parameter. + * + */ +class AdaptivePoolingShapeInfer : public ShapeInferEmptyPads { +public: + explicit AdaptivePoolingShapeInfer(size_t outputs_count) : m_outputs_count(outputs_count) {} + Result infer( + const std::vector>& input_shapes, + const std::unordered_map& data_dependency) override; + + port_mask_t get_port_mask() const override { + return PortMask(1); + } + +private: + size_t m_outputs_count; +}; + +class AdaptivePoolingShapeInferFactory : public ShapeInferFactory { +public: + AdaptivePoolingShapeInferFactory(std::shared_ptr op) : m_op(op) {} + ShapeInferPtr makeShapeInfer() const override; + +private: + std::shared_ptr m_op; +}; + +} // namespace node +} // namespace intel_cpu +} // namespace ov + diff --git a/src/plugins/intel_cpu/src/shape_inference/custom/color_convert.cpp b/src/plugins/intel_cpu/src/shape_inference/custom/color_convert.cpp new file mode 100644 index 00000000000..d46595508ff --- /dev/null +++ b/src/plugins/intel_cpu/src/shape_inference/custom/color_convert.cpp @@ -0,0 +1,38 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "nodes/color_convert.h" +#include "color_convert.hpp" +#include "utils.hpp" +#include "ie_ngraph_utils.hpp" + +namespace ov { +namespace intel_cpu { +namespace node { +using namespace InferenceEngine; +/** + * Implements Color Convert shape inference algorithm. Depending on wether it has only single plain H dimension is + * passed through or recalculated as 2/3 of the initial size. + * + */ +Result ColorConvertShapeInfer::infer(const std::vector>& input_shapes, + const std::unordered_map& data_dependency) { + const auto& dims = input_shapes.front().get(); + if (dims.size() != 4) { + OPENVINO_THROW("NV12Converter node has incorrect input dimensions"); + } + return { m_singlePlain + ? std::vector{ { dims[ColorConvert::Converter::N_DIM], dims[ColorConvert::Converter::H_DIM] * 2 / 3, dims[ColorConvert::Converter::W_DIM], 3 } } + : + std::vector{ { dims[ColorConvert::Converter::N_DIM], dims[ColorConvert::Converter::H_DIM], dims[ColorConvert::Converter::W_DIM], 3 } }, + ShapeInferStatus::success }; +} + +ShapeInferPtr ColorConvertShapeInferFactory::makeShapeInfer() const { + bool isSinglePlain = m_op->get_input_size() == 1; + return std::make_shared(isSinglePlain); +} +} // namespace node +} // namespace intel_cpu +} // namespace ov diff --git a/src/plugins/intel_cpu/src/shape_inference/custom/color_convert.hpp b/src/plugins/intel_cpu/src/shape_inference/custom/color_convert.hpp new file mode 100644 index 00000000000..f52d9569e60 --- /dev/null +++ b/src/plugins/intel_cpu/src/shape_inference/custom/color_convert.hpp @@ -0,0 +1,44 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include "shape_inference/shape_inference_cpu.hpp" + +#pragma once +namespace ov { +namespace intel_cpu { +namespace node { +using Result = IShapeInfer::Result; + +/** + * Implements Color Convert shape inference algorithm. Depending on wether it has only single plain H dimension is + * passed through or recalculated as 2/3 of the initial size. + * + */ +class ColorConvertShapeInfer : public ShapeInferEmptyPads { +public: + ColorConvertShapeInfer(bool singlePlain) : m_singlePlain(singlePlain) {} + Result infer(const std::vector>& input_shapes, + const std::unordered_map& data_dependency) override; + port_mask_t get_port_mask() const override { + return EMPTY_PORT_MASK; + } + +private: + bool m_singlePlain = false; +}; + +class ColorConvertShapeInferFactory : public ShapeInferFactory { +public: + ColorConvertShapeInferFactory(std::shared_ptr op) : m_op(op) {} + ShapeInferPtr makeShapeInfer() const override; + +private: + std::shared_ptr m_op; +}; + +} // namespace node +} // namespace intel_cpu +} // namespace ov + diff --git a/src/plugins/intel_cpu/src/shape_inference/custom/eltwise.cpp b/src/plugins/intel_cpu/src/shape_inference/custom/eltwise.cpp new file mode 100644 index 00000000000..2a31f787c9c --- /dev/null +++ b/src/plugins/intel_cpu/src/shape_inference/custom/eltwise.cpp @@ -0,0 +1,57 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "eltwise.hpp" +#include "utils.hpp" +#include "ie_ngraph_utils.hpp" + +namespace ov { +namespace intel_cpu { +namespace node { +using namespace InferenceEngine; + +/** + * Implements Eltwise shape inference algorithm. The algorithm is based on broadcasting all the input shapes + * according to the NUMPY broadcast rule. This implementation is more lightweight than the ngraph one. + * + */ +Result EltwiseShapeInfer::infer( + const std::vector>& input_shapes, + const std::unordered_map& data_dependency) { + size_t max_rank = 0; + size_t max_rank_idx = 0; + for (size_t i = 0; i < input_shapes.size(); ++i) { + auto item_rank = input_shapes[i].get().size(); + if (item_rank > max_rank) { + max_rank = item_rank; + max_rank_idx = i; + } + } + auto output_shape = input_shapes[max_rank_idx].get(); + // use NUMPY broadcast rule + for (size_t i = 0; i < input_shapes.size(); i++) { + if (i == max_rank_idx) + continue; + + auto& input_shape = input_shapes[i].get(); + if (input_shape.size() > output_shape.size()) { + OPENVINO_THROW("Eltwise shape infer input and output shapes rank mismatch"); + } + size_t offset = output_shape.size() - input_shape.size(); + for (size_t j = 0; j < input_shape.size(); ++j) { + if (input_shape[j] != output_shape[offset + j]) { + if (output_shape[offset + j] == 1) { + output_shape[offset + j] = input_shape[j]; + } else { + if (input_shape[j] != 1) OPENVINO_THROW("Eltwise shape infer input shapes dim index: ", j, " mismatch"); + } + } + } + } + return { { std::move(output_shape) }, ShapeInferStatus::success }; +} + +} // namespace node +} // namespace intel_cpu +} // namespace ov diff --git a/src/plugins/intel_cpu/src/shape_inference/custom/eltwise.hpp b/src/plugins/intel_cpu/src/shape_inference/custom/eltwise.hpp new file mode 100644 index 00000000000..0adbc91b94c --- /dev/null +++ b/src/plugins/intel_cpu/src/shape_inference/custom/eltwise.hpp @@ -0,0 +1,38 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include "shape_inference/shape_inference_cpu.hpp" + +#pragma once +namespace ov { +namespace intel_cpu { +namespace node { +using Result = IShapeInfer::Result; + +/** + * Implements Eltwise shape inference algorithm. The algorithm is based on broadcasting all the input shapes + * according to the NUMPY broadcast rule. This implementation is more lightweight than the ngraph one. + * + */ +class EltwiseShapeInfer : public ShapeInferEmptyPads { +public: + Result infer( + const std::vector>& input_shapes, + const std::unordered_map& data_dependency) override; + port_mask_t get_port_mask() const override { + return EMPTY_PORT_MASK; + } +}; + +class EltwiseShapeInferFactory : public ShapeInferFactory { +public: + ShapeInferPtr makeShapeInfer() const override { + return std::make_shared(); + } +}; +} // namespace node +} // namespace intel_cpu +} // namespace ov + diff --git a/src/plugins/intel_cpu/src/shape_inference/custom/fullyconnected.cpp b/src/plugins/intel_cpu/src/shape_inference/custom/fullyconnected.cpp new file mode 100644 index 00000000000..8b7b3e8d700 --- /dev/null +++ b/src/plugins/intel_cpu/src/shape_inference/custom/fullyconnected.cpp @@ -0,0 +1,39 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "fullyconnected.hpp" +#include "utils.hpp" +#include "ie_ngraph_utils.hpp" + +namespace ov { +namespace intel_cpu { +namespace node { +using namespace InferenceEngine; +Result FCShapeInfer::infer( + const std::vector>& input_shapes, + const std::unordered_map& data_dependency) { + const VectorDims& activationShape = input_shapes[0].get(); + const VectorDims& weightShape = input_shapes[1].get(); + size_t activationRank = activationShape.size(); + size_t channelRank = weightShape.size() - 1; + + // activation weight output_shape + // NCHW CoCHW NCo + // TNC CoC TNCo + // NC CoC NCo + VectorDims outputShape(out_rank, 1); + // set Co + outputShape.back() = weightShape[0]; + // set batch dims + size_t batchRank = activationRank - channelRank; + size_t startIdx = out_rank - batchRank - 1; + for (size_t i = 0; i < batchRank; i++) { + outputShape[i + startIdx] = activationShape[i]; + } + + return {{std::move(outputShape)}, ShapeInferStatus::success}; +} +} // namespace node +} // namespace intel_cpu +} // namespace ov diff --git a/src/plugins/intel_cpu/src/shape_inference/custom/fullyconnected.hpp b/src/plugins/intel_cpu/src/shape_inference/custom/fullyconnected.hpp new file mode 100644 index 00000000000..d950f6b8f7a --- /dev/null +++ b/src/plugins/intel_cpu/src/shape_inference/custom/fullyconnected.hpp @@ -0,0 +1,42 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include "shape_inference/shape_inference_cpu.hpp" + +#pragma once +namespace ov { +namespace intel_cpu { +namespace node { +using Result = IShapeInfer::Result; + +class FCShapeInfer : public ShapeInferEmptyPads { +public: + FCShapeInfer(size_t outPut_rank) : out_rank(outPut_rank) {} + Result infer( + const std::vector>& input_shapes, + const std::unordered_map& data_dependency) override; + + port_mask_t get_port_mask() const override { + return EMPTY_PORT_MASK; + } + +private: + size_t out_rank = 0; +}; + +class FCShapeInferFactory : public ShapeInferFactory { +public: + FCShapeInferFactory(std::shared_ptr op) : m_op(op) {} + ShapeInferPtr makeShapeInfer() const override { + return std::make_shared(m_op->get_output_partial_shape(0).rank().get_length()); + } + +private: + std::shared_ptr m_op; +}; +} // namespace node +} // namespace intel_cpu +} // namespace ov + diff --git a/src/plugins/intel_cpu/src/shape_inference/custom/gather.cpp b/src/plugins/intel_cpu/src/shape_inference/custom/gather.cpp new file mode 100644 index 00000000000..85aa76b29d5 --- /dev/null +++ b/src/plugins/intel_cpu/src/shape_inference/custom/gather.cpp @@ -0,0 +1,60 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "gather.hpp" +#include "utils.hpp" +#include "ie_ngraph_utils.hpp" + +namespace ov { +namespace intel_cpu { +namespace node { +using namespace InferenceEngine; +Result GatherShapeInfer::infer(const std::vector>& input_shapes, + const std::unordered_map& data_dependency) { + static constexpr size_t GATHER_DATA = 0, GATHER_INDICES = 1, GATHER_AXIS = 2; + const auto& input_shape = input_shapes[GATHER_DATA].get(); + // Use VectorDims{} instead of {1} for Scalar + const auto& indices_shape = m_isIndicesScalar ? VectorDims{} : + input_shapes[GATHER_INDICES].get(); + if (!m_isAxisInputConst) { + if (data_dependency.at(GATHER_AXIS)->getDesc().getPrecision() != Precision::I32) { + OPENVINO_THROW("Unsupported precision ", data_dependency.at(GATHER_AXIS)->getDesc().getPrecision(), + " for axis tensor."); + } + m_axis = reinterpret_cast(data_dependency.at(GATHER_AXIS)->getData())[0]; + } + if (m_axis < 0) { + m_axis += input_shape.size(); + } + if (m_batchDims < 0) { + m_batchDims += indices_shape.size(); + } + VectorDims output_shape; + output_shape.reserve(input_shape.size() + indices_shape.size() - m_batchDims - 1); + output_shape.insert(output_shape.end(), input_shape.begin(), input_shape.begin() + m_axis); + output_shape.insert(output_shape.end(), indices_shape.begin() + m_batchDims, indices_shape.end()); + output_shape.insert(output_shape.end(), input_shape.begin() + m_axis + 1, input_shape.end()); + return {{std::move(output_shape)}, ShapeInferStatus::success}; +} + +ShapeInferPtr GatherShapeInferFactory::makeShapeInfer() const { + static constexpr size_t GATHER_INDICES = 1, GATHER_AXIS = 2; + bool isAxisInputConst = ov::is_type(m_op->get_input_node_ptr(GATHER_AXIS)); + const auto& indicesShape = m_op->get_input_partial_shape(GATHER_INDICES); + if (!indicesShape.rank().is_static()) { + OPENVINO_THROW("indicesShape do not support dynamic rank."); + } + bool isIndicesScalar = indicesShape.rank().get_length() == 0; + int axis = isAxisInputConst ? ov::as_type(m_op->get_input_node_ptr( + GATHER_AXIS))->cast_vector()[0] : 0; + int batchDims = ov::is_type(m_op) ? static_cast(ov::as_type_ptr + (m_op)->get_batch_dims()) : ( + ov::is_type(m_op) ? static_cast(ov::as_type_ptr + (m_op)->get_batch_dims()) : 0); + return std::make_shared(isAxisInputConst, isIndicesScalar, axis, batchDims); +} + +} // namespace node +} // namespace intel_cpu +} // namespace ov diff --git a/src/plugins/intel_cpu/src/shape_inference/custom/gather.hpp b/src/plugins/intel_cpu/src/shape_inference/custom/gather.hpp new file mode 100644 index 00000000000..5782dfb5b0b --- /dev/null +++ b/src/plugins/intel_cpu/src/shape_inference/custom/gather.hpp @@ -0,0 +1,43 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include "shape_inference/shape_inference_cpu.hpp" + +#pragma once +namespace ov { +namespace intel_cpu { +namespace node { +using Result = IShapeInfer::Result; + +class GatherShapeInfer : public ShapeInferEmptyPads { +public: + GatherShapeInfer(bool isAxisInputConst, bool isIndicesScalar, int axis, int batchDims) : m_isAxisInputConst(isAxisInputConst), + m_isIndicesScalar(isIndicesScalar), m_axis(axis), m_batchDims(batchDims) {} + + Result infer(const std::vector>& input_shapes, + const std::unordered_map& data_dependency) override; + port_mask_t get_port_mask() const override { + return PortMask(2); + } + +private: + bool m_isAxisInputConst = false; + bool m_isIndicesScalar = false; + int m_axis = 0; + int m_batchDims = 0; +}; + +class GatherShapeInferFactory : public ShapeInferFactory { +public: + GatherShapeInferFactory(std::shared_ptr op) : m_op(op) {} + ShapeInferPtr makeShapeInfer() const override; + +private: + std::shared_ptr m_op; +}; +} // namespace node +} // namespace intel_cpu +} // namespace ov + diff --git a/src/plugins/intel_cpu/src/shape_inference/custom/matmul.cpp b/src/plugins/intel_cpu/src/shape_inference/custom/matmul.cpp new file mode 100644 index 00000000000..1484e35b179 --- /dev/null +++ b/src/plugins/intel_cpu/src/shape_inference/custom/matmul.cpp @@ -0,0 +1,64 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "matmul.hpp" +#include "utils.hpp" +#include "ie_ngraph_utils.hpp" +#include + +namespace ov { +namespace intel_cpu { +namespace node { +using namespace InferenceEngine; +Result MMShapeInfer::infer( + const std::vector>& input_shapes, + const std::unordered_map& data_dependency) { + const VectorDims& shapeA = input_shapes[0].get(); + const VectorDims& shapeB = input_shapes[1].get(); + const size_t rankA = shapeA.size(); + const size_t rankB = shapeB.size(); + + // getSupportedDescriptors has done some shape check. + // 1. Needn't assert the scalar type since the matmul_shape_inference has checked. + // 2. Needn't check the compatibility of the last two dims + // 3. 1-D x 1-D is needed + // 4. transpose is necessary + // 5. Just support the same rank of matmul + // 6. simplify the broadcast check + if (rankA == 1 && rankB == 1 && shapeA[0] == shapeB[0]) { + return {{m_shapeY}, ShapeInferStatus::success}; + } + + m_shapeY[m_out_rank-2] = m_transpose_a ? shapeA[rankA-1] : shapeA[rankA-2]; + m_shapeY[m_out_rank-1] = m_transpose_b ? shapeB[rankB-2] : shapeB[rankB-1]; + + for (size_t i=0; i < m_out_rank-2; ++i) { + if (shapeA[i] != shapeB[i]) { + if (shapeB[i] == 1) { + m_shapeY[i] = shapeA[i]; + continue; + } else if (shapeA[i] != 1) { + OPENVINO_THROW("Incompatible MatMul batch dimension. Cant merge the first input dimension=", + shapeA[i], " with second input dimension=", shapeB[i], " at index=", i); + } + } + m_shapeY[i] = shapeB[i]; + } + + return {{m_shapeY}, ShapeInferStatus::success}; +} + +ShapeInferPtr MMShapeInferFactory::makeShapeInfer() const { + if (const auto matmul = ov::as_type_ptr(m_op)) { + const auto output_rank = matmul->get_output_partial_shape(0).rank().get_length(); + const bool transpose_a = matmul->get_transpose_a(); + const bool transpose_b = matmul->get_transpose_b(); + return std::make_shared(output_rank, transpose_a, transpose_b); + } else { + OPENVINO_THROW("Unexpected operation type in the MatMul shape inference factory"); + } +} +} // namespace node +} // namespace intel_cpu +} // namespace ov diff --git a/src/plugins/intel_cpu/src/shape_inference/custom/matmul.hpp b/src/plugins/intel_cpu/src/shape_inference/custom/matmul.hpp new file mode 100644 index 00000000000..dd38d984d08 --- /dev/null +++ b/src/plugins/intel_cpu/src/shape_inference/custom/matmul.hpp @@ -0,0 +1,44 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include "shape_inference/shape_inference_cpu.hpp" + +#pragma once +namespace ov { +namespace intel_cpu { +namespace node { +using Result = IShapeInfer::Result; +class MMShapeInfer : public ShapeInferEmptyPads { +public: + MMShapeInfer(const size_t& out_rank, const bool& transpose_a, const bool& transpose_b) : + m_out_rank(out_rank), m_transpose_a(transpose_a), m_transpose_b(transpose_b) { + m_shapeY = VectorDims(m_out_rank, 1); // for output and cache + } + Result infer( + const std::vector>& input_shapes, + const std::unordered_map& data_dependency) override; + + port_mask_t get_port_mask() const override { + return EMPTY_PORT_MASK; + } + +private: + VectorDims m_shapeY; + const size_t m_out_rank; + const bool m_transpose_a; + const bool m_transpose_b; +}; + +class MMShapeInferFactory : public ShapeInferFactory { +public: + MMShapeInferFactory(const std::shared_ptr& op) : m_op(op) {} + ShapeInferPtr makeShapeInfer() const override; +private: + std::shared_ptr m_op; +}; +} // namespace node +} // namespace intel_cpu +} // namespace ov + diff --git a/src/plugins/intel_cpu/src/shape_inference/custom/ngram.cpp b/src/plugins/intel_cpu/src/shape_inference/custom/ngram.cpp new file mode 100644 index 00000000000..89af2df2759 --- /dev/null +++ b/src/plugins/intel_cpu/src/shape_inference/custom/ngram.cpp @@ -0,0 +1,31 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "transformations/cpu_opset/common/op/ngram.hpp" +#include "ngram.hpp" +#include "utils.hpp" +#include "ie_ngraph_utils.hpp" + +namespace ov { +namespace intel_cpu { +namespace node { +using namespace InferenceEngine; +Result NgramShapeInfer::infer( + const std::vector>& input_shapes, + const std::unordered_map& data_dependency) { + auto output_shape = input_shapes[0].get(); + output_shape[1] *= m_k; + return {{std::move(output_shape)}, ShapeInferStatus::success}; +} + +ShapeInferPtr NgramShapeInferFactory::makeShapeInfer() const { + auto ngram = ov::as_type_ptr(m_op); + if (!ngram) { + OPENVINO_THROW("Wrong operation type"); + } + return std::make_shared(ngram->get_k()); +} +} // namespace node +} // namespace intel_cpu +} // namespace ov diff --git a/src/plugins/intel_cpu/src/shape_inference/custom/ngram.hpp b/src/plugins/intel_cpu/src/shape_inference/custom/ngram.hpp new file mode 100644 index 00000000000..d3b7fd23da3 --- /dev/null +++ b/src/plugins/intel_cpu/src/shape_inference/custom/ngram.hpp @@ -0,0 +1,39 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include "shape_inference/shape_inference_cpu.hpp" + +#pragma once +namespace ov { +namespace intel_cpu { +namespace node { +using Result = IShapeInfer::Result; +class NgramShapeInfer : public ShapeInferEmptyPads { +public: + NgramShapeInfer(const size_t k) : m_k(k) {} + Result infer( + const std::vector>& input_shapes, + const std::unordered_map& data_dependency) override; + + port_mask_t get_port_mask() const override { + return EMPTY_PORT_MASK; + } + +private: + size_t m_k; +}; + +class NgramShapeInferFactory : public ShapeInferFactory { +public: + NgramShapeInferFactory(const std::shared_ptr& op) : m_op(op) {} + ShapeInferPtr makeShapeInfer() const override; + +private: + std::shared_ptr m_op; +}; +} // namespace node +} // namespace intel_cpu +} // namespace ov + diff --git a/src/plugins/intel_cpu/src/shape_inference/custom/one_hot.cpp b/src/plugins/intel_cpu/src/shape_inference/custom/one_hot.cpp new file mode 100644 index 00000000000..fa7352dd344 --- /dev/null +++ b/src/plugins/intel_cpu/src/shape_inference/custom/one_hot.cpp @@ -0,0 +1,47 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "one_hot.hpp" +#include "utils.hpp" +#include "ie_ngraph_utils.hpp" +#include + +namespace ov { +namespace intel_cpu { +namespace node { +using namespace InferenceEngine; + +/** + * Implements One Hot shape inference algorithm. The output shape is the input `indices` tensor shape, where a new axis + * of size `depth` is inserted at the dimension defined by the `axis` parameter. + * + */ +Result OneHotShapeInfer::infer( + const std::vector>& input_shapes, + const std::unordered_map& data_dependency) { + auto depth = reinterpret_cast(data_dependency.at(1)->getData())[0]; + + auto result = input_shapes.front().get(); + result.insert(result.begin() + m_axis, depth); + + return {{std::move(result)}, ShapeInferStatus::success}; +} + +ShapeInferPtr OneHotShapeInferFactory::makeShapeInfer() const { + auto oneHot = ov::as_type_ptr(m_op); + if (!oneHot) { + OPENVINO_THROW("Unexpected op type in OneHot shape inference factory: ", m_op->get_type_name()); + } + auto axis = oneHot->get_axis(); + auto dstShape = oneHot->get_output_partial_shape(0); + int output_dims_size = dstShape.size(); + if (0 == output_dims_size) output_dims_size = 1; + if (axis < 0) { + axis += output_dims_size; + } + return std::make_shared(axis); +} +} // namespace node +} // namespace intel_cpu +} // namespace ov diff --git a/src/plugins/intel_cpu/src/shape_inference/custom/one_hot.hpp b/src/plugins/intel_cpu/src/shape_inference/custom/one_hot.hpp new file mode 100644 index 00000000000..0cf348af5e3 --- /dev/null +++ b/src/plugins/intel_cpu/src/shape_inference/custom/one_hot.hpp @@ -0,0 +1,45 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include "shape_inference/shape_inference_cpu.hpp" + +#pragma once +namespace ov { +namespace intel_cpu { +namespace node { +using Result = IShapeInfer::Result; +/** + * Implements One Hot shape inference algorithm. The output shape is the input `indices` tensor shape, where a new axis + * of size `depth` is inserted at the dimension defined by the `axis` parameter. + * + */ +class OneHotShapeInfer : public ShapeInferEmptyPads { +public: + explicit OneHotShapeInfer(int64_t axis) : m_axis(axis) {} + Result infer( + const std::vector>& input_shapes, + const std::unordered_map& data_dependency) override; + + port_mask_t get_port_mask() const override { + return PortMask(1); + } + +private: + int64_t m_axis = 0; +}; + +class OneHotShapeInferFactory : public ShapeInferFactory { +public: + OneHotShapeInferFactory(std::shared_ptr op) : m_op(op) {} + ShapeInferPtr makeShapeInfer() const override; + +private: + std::shared_ptr m_op; +}; + +} // namespace node +} // namespace intel_cpu +} // namespace ov + diff --git a/src/plugins/intel_cpu/src/shape_inference/custom/priorbox.cpp b/src/plugins/intel_cpu/src/shape_inference/custom/priorbox.cpp new file mode 100644 index 00000000000..9edf6ec8281 --- /dev/null +++ b/src/plugins/intel_cpu/src/shape_inference/custom/priorbox.cpp @@ -0,0 +1,40 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "priorbox.hpp" +#include "utils.hpp" +#include "ie_ngraph_utils.hpp" +#include + +namespace ov { +namespace intel_cpu { +namespace node { +using namespace InferenceEngine; +/** + * Implements Prior Box Clustered shape inference algorithm. The output shape is [2, 4 * height * width * number_of_priors]. + * `number_of_priors` is an attribute of the operation. heigh and width are in the the first input parameter. + * + */ +Result PriorBoxShapeInfer::infer( + const std::vector>& input_shapes, + const std::unordered_map& data_dependency) { + const int* in_data = reinterpret_cast(data_dependency.at(0)->getData()); + const int H = in_data[0]; + const int W = in_data[1]; + const auto output = static_cast(4 * H * W * m_number_of_priors); + return {{{2, output}}, ShapeInferStatus::success}; +} + +ShapeInferPtr PriorBoxShapeInferFactory::makeShapeInfer() const { + auto priorBox = ov::as_type_ptr(m_op); + if (!priorBox) { + OPENVINO_THROW("Unexpected op type in PriorBox shape inference factory: ", m_op->get_type_name()); + } + const auto& attrs = priorBox->get_attrs(); + auto number_of_priors = ngraph::opset1::PriorBox::number_of_priors(attrs); + return std::make_shared(number_of_priors); +} +} // namespace node +} // namespace intel_cpu +} // namespace ov diff --git a/src/plugins/intel_cpu/src/shape_inference/custom/priorbox.hpp b/src/plugins/intel_cpu/src/shape_inference/custom/priorbox.hpp new file mode 100644 index 00000000000..6d8fb7a25bb --- /dev/null +++ b/src/plugins/intel_cpu/src/shape_inference/custom/priorbox.hpp @@ -0,0 +1,45 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include "shape_inference/shape_inference_cpu.hpp" + +#pragma once +namespace ov { +namespace intel_cpu { +namespace node { +using Result = IShapeInfer::Result; + +class PriorBoxShapeInfer : public ShapeInferEmptyPads { +/** + * Implements Prior Box Clustered shape inference algorithm. The output shape is [2, 4 * height * width * number_of_priors]. + * `number_of_priors` is an attribute of the operation. heigh and width are in the the first input parameter. + * + */ +public: + explicit PriorBoxShapeInfer(int64_t number_of_priors) : m_number_of_priors(number_of_priors) {} + Result infer( + const std::vector>& input_shapes, + const std::unordered_map& data_dependency) override; + + port_mask_t get_port_mask() const override { + return PortMask(0); + } + +private: + int64_t m_number_of_priors = 0; +}; + +class PriorBoxShapeInferFactory : public ShapeInferFactory { +public: + explicit PriorBoxShapeInferFactory(std::shared_ptr op) : m_op(op) {} + ShapeInferPtr makeShapeInfer() const override; + +private: + std::shared_ptr m_op; +}; +} // namespace node +} // namespace intel_cpu +} // namespace ov + diff --git a/src/plugins/intel_cpu/src/shape_inference/custom/priorbox_clustered.cpp b/src/plugins/intel_cpu/src/shape_inference/custom/priorbox_clustered.cpp new file mode 100644 index 00000000000..3232a9f2e5c --- /dev/null +++ b/src/plugins/intel_cpu/src/shape_inference/custom/priorbox_clustered.cpp @@ -0,0 +1,42 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "priorbox_clustered.hpp" +#include "utils.hpp" +#include "ie_ngraph_utils.hpp" +#include + +namespace ov { +namespace intel_cpu { +namespace node { +using namespace InferenceEngine; + +/** + * Implements Prior Box Clustered shape inference algorithm. The output shape is [2, 4 * height * width * number_of_priors]. + * `number_of_priors` is an attribute of the operation. heigh and width are in the the first input parameter. + * + */ +Result PriorBoxClusteredShapeInfer::infer( + const std::vector>& input_shapes, + const std::unordered_map& data_dependency) { + const int* in_data = reinterpret_cast(data_dependency.at(0)->getData()); + const int H = in_data[0]; + const int W = in_data[1]; + const auto output = static_cast(4 * H * W * m_number_of_priors); + return {{{2, output}}, ShapeInferStatus::success}; +} + +ShapeInferPtr PriorBoxClusteredShapeInferFactory::makeShapeInfer() const { + auto priorBox = ov::as_type_ptr(m_op); + if (!priorBox) { + OPENVINO_THROW("Unexpected op type in PriorBoxClustered shape inference factory: ", m_op->get_type_name()); + } + const auto& attrs = priorBox->get_attrs(); + auto number_of_priors = attrs.widths.size(); + return std::make_shared(number_of_priors); +} + +} // namespace node +} // namespace intel_cpu +} // namespace ov diff --git a/src/plugins/intel_cpu/src/shape_inference/custom/priorbox_clustered.hpp b/src/plugins/intel_cpu/src/shape_inference/custom/priorbox_clustered.hpp new file mode 100644 index 00000000000..fec8f9e20cf --- /dev/null +++ b/src/plugins/intel_cpu/src/shape_inference/custom/priorbox_clustered.hpp @@ -0,0 +1,46 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include "shape_inference/shape_inference_cpu.hpp" + +#pragma once +namespace ov { +namespace intel_cpu { +namespace node { +using Result = IShapeInfer::Result; + +/** + * Implements Prior Box Clustered shape inference algorithm. The output shape is [2, 4 * height * width * number_of_priors]. + * `number_of_priors` is an attribute of the operation. heigh and width are in the the first input parameter. + * + */ +class PriorBoxClusteredShapeInfer : public ShapeInferEmptyPads { +public: + explicit PriorBoxClusteredShapeInfer(size_t number_of_priors) : m_number_of_priors(number_of_priors) {} + Result infer( + const std::vector>& input_shapes, + const std::unordered_map& data_dependency) override; + + port_mask_t get_port_mask() const override { + return PortMask(0); + } + +private: + size_t m_number_of_priors = 0; +}; + +class PriorBoxClusteredShapeInferFactory : public ShapeInferFactory { +public: + explicit PriorBoxClusteredShapeInferFactory(std::shared_ptr op) : m_op(op) {} + ShapeInferPtr makeShapeInfer() const override; + +private: + std::shared_ptr m_op; +}; + +} // namespace node +} // namespace intel_cpu +} // namespace ov + diff --git a/src/plugins/intel_cpu/src/shape_inference/custom/reshape.cpp b/src/plugins/intel_cpu/src/shape_inference/custom/reshape.cpp new file mode 100644 index 00000000000..d5c535b03cf --- /dev/null +++ b/src/plugins/intel_cpu/src/shape_inference/custom/reshape.cpp @@ -0,0 +1,193 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "reshape.hpp" +#include +#include "utils.hpp" +#include "ie_ngraph_utils.hpp" +#include + +namespace ov { +namespace intel_cpu { +namespace node { + +Result ReshapeShapeInfer::infer(const std::vector>& input_shapes, + const std::unordered_map& data_dependency) { + static constexpr size_t RESHAPE_SRC = 0, RESHAPE_PATTERN = 1; + const auto& inputShape = input_shapes[RESHAPE_SRC].get(); + const size_t inputShapeSize = inputShape.size(); + const auto memPtr = data_dependency.at(RESHAPE_PATTERN); + const auto data = memPtr->getData(); + const auto& dims = memPtr->getStaticDims(); + const auto outputPatternSize = std::accumulate(dims.begin(), dims.end(), 1, std::multiplies()); + std::vector outPattern = ov::get_raw_data_as( + InferenceEngine::details::convertPrecision(memPtr->getDesc().getPrecision()), + data, + outputPatternSize, + ov::util::Cast()); + VectorDims outputShape(outputPatternSize); + size_t outputProduct = 1; + int32_t minusOneIdx = -1; + int32_t minusOneCount = 0; + for (int32_t i = 0; i < outputPatternSize; ++i) { + if (outPattern[i] == 0 && m_specialZero && i < static_cast(inputShapeSize)) { + outputShape[i] = inputShape[i]; + } else if (outPattern[i] == -1) { + minusOneIdx = i; + minusOneCount++; + } else { + outputShape[i] = outPattern[i]; + outputProduct *= outputShape[i]; + } + } + size_t inputProduct = 1; + for (size_t i = 0; i < inputShapeSize; ++i) { + if (static_cast(i) < outputPatternSize && outPattern[i] == 0 && m_specialZero) { + continue; + } + inputProduct *= inputShape[i]; + } + if (minusOneIdx >= 0) { + if (outputProduct != 0) { + outputShape[minusOneIdx] = inputProduct / outputProduct; + outputProduct *= outputShape[minusOneIdx]; + } else { + outputShape[minusOneIdx] = 0; + } + } + if (minusOneCount > 1 || inputProduct != outputProduct) { + OPENVINO_THROW("[cpu]reshape: the shape of input data ", ov::intel_cpu::vec2str(inputShape), + " conflicts with the reshape pattern ", ov::intel_cpu::vec2str(outPattern)); + } + return {{std::move(outputShape)}, ShapeInferStatus::success}; +} + +Result SqueezeShapeInfer::infer(const std::vector>& input_shapes, + const std::unordered_map& data_dependency) { + static constexpr size_t SQUEEZE_SRC = 0, SQUEEZE_PATTERN = 1; + const auto& inputShape = input_shapes[SQUEEZE_SRC].get(); + const size_t inputShapeSize = inputShape.size(); + auto itr = data_dependency.find(SQUEEZE_PATTERN); + VectorDims outputShape; + outputShape.reserve(inputShapeSize); + if (itr != data_dependency.end()) { + const auto memPtr = data_dependency.at(SQUEEZE_PATTERN); + const auto data = memPtr->getData(); + const auto& dims = memPtr->getStaticDims(); + if (dims.size() != 0) { + const size_t outputPatternSize = std::accumulate(dims.begin(), dims.end(), 1, std::multiplies()); + std::vector outPattern = ov::get_raw_data_as( + InferenceEngine::details::convertPrecision(memPtr->getDesc().getPrecision()), + data, + outputPatternSize, + ov::util::Cast()); + std::vector originOutPattern = outPattern; + std::vector removeMask(inputShapeSize, false); + bool existError = false; + for (size_t i = 0; i < outputPatternSize; i++) { + if (outPattern[i] < 0) { + outPattern[i] = inputShapeSize + outPattern[i]; + } + if (outPattern[i] >= 0 && outPattern[i] < static_cast(inputShapeSize)) { + removeMask[outPattern[i]] = true; + } else { + existError = true; + break; + } + } + for (size_t i = 0; i < inputShapeSize; i++) { + if (!removeMask[i]) { + outputShape.push_back(inputShape[i]); + } else if (inputShape[i] != 1) { + existError = true; + break; + } + } + if (existError) { + OPENVINO_THROW("[cpu]squeeze: the shape of input data ", ov::intel_cpu::vec2str(inputShape), + " conflicts with the squeeze pattern ", ov::intel_cpu::vec2str(originOutPattern)); + } + } else { + for (size_t i = 0; i < inputShapeSize; i++) { + if (inputShape[i] != 1) { + outputShape.push_back(inputShape[i]); + } + } + } + } else { + for (size_t i = 0; i < inputShapeSize; i++) { + if (inputShape[i] != 1) { + outputShape.push_back(inputShape[i]); + } + } + } + return {{std::move(outputShape)}, ShapeInferStatus::success}; +} + +Result UnsqueezeShapeInfer::infer(const std::vector>& input_shapes, + const std::unordered_map& data_dependency) { + static constexpr size_t UNSQUEEZE_SRC = 0, UNSQUEEZE_PATTERN = 1; + const auto& inputShape = input_shapes[UNSQUEEZE_SRC].get(); + const size_t inputShapeSize = inputShape.size(); + const auto memPtr = data_dependency.at(UNSQUEEZE_PATTERN); + const auto data = memPtr->getData(); + const auto& dims = memPtr->getStaticDims(); + size_t outputPatternSize = std::accumulate(dims.begin(), dims.end(), 1, std::multiplies()); + std::vector originOutPattern = ov::get_raw_data_as( + InferenceEngine::details::convertPrecision(memPtr->getDesc().getPrecision()), + data, + outputPatternSize, + ov::util::Cast()); + // remove repeated pattern + std::unordered_set tmp(originOutPattern.begin(), originOutPattern.end()); + std::vector outPattern = std::vector(tmp.begin(), tmp.end()); + outputPatternSize = outPattern.size(); + size_t outputShapeSize = inputShapeSize + outputPatternSize; + VectorDims outputShape(outputShapeSize, 0); + bool existError = false; + for (size_t i = 0; i < outputPatternSize; i++) { + if (outPattern[i] < 0) { + outPattern[i] = outputShapeSize + outPattern[i]; + } + if (outPattern[i] >= 0 && outPattern[i] < static_cast(outputShapeSize)) { + outputShape[outPattern[i]] = 1; + } else { + existError = true; + break; + } + } + for (size_t i = 0, y = 0; i < outputShapeSize; i++) { + if (outputShape[i] == 0) { + if (y < inputShapeSize) { + outputShape[i] = inputShape[y]; + y++; + } else { + existError = true; + break; + } + } + } + if (existError) { + OPENVINO_THROW("[cpu]unsqueeze: the shape of input data ", ov::intel_cpu::vec2str(inputShape), + " conflicts with the unsqueeze pattern ", ov::intel_cpu::vec2str(originOutPattern)); + } + return {{std::move(outputShape)}, ShapeInferStatus::success}; +} + +ShapeInferPtr ReshapeShapeInferFactory::makeShapeInfer() const { + if (const auto reshapeOp = ov::as_type_ptr(m_op)) { + return std::make_shared(reshapeOp->get_special_zero()); + } else if (ov::is_type(m_op)) { + return std::make_shared(); + } else if (ov::is_type(m_op)) { + return std::make_shared(); + } else { + OPENVINO_THROW("[cpu]reshape: ", m_op->get_type_name(), " is not implemented"); + } +} + +} // namespace node +} // namespace intel_cpu +} // namespace ov + diff --git a/src/plugins/intel_cpu/src/shape_inference/custom/reshape.hpp b/src/plugins/intel_cpu/src/shape_inference/custom/reshape.hpp new file mode 100644 index 00000000000..c9ba51870cb --- /dev/null +++ b/src/plugins/intel_cpu/src/shape_inference/custom/reshape.hpp @@ -0,0 +1,57 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include "shape_inference/shape_inference_cpu.hpp" + +#pragma once +namespace ov { +namespace intel_cpu { +namespace node { +using Result = IShapeInfer::Result; +class ReshapeShapeInfer : public ShapeInferEmptyPads { +public: + ReshapeShapeInfer(bool specialZero) : m_specialZero(specialZero) {} + Result infer(const std::vector>& input_shapes, + const std::unordered_map& data_dependency) override; + port_mask_t get_port_mask() const override { + return PortMask(1); + } + +private: + bool m_specialZero; +}; + +class SqueezeShapeInfer : public ShapeInferEmptyPads { +public: + SqueezeShapeInfer() {} + Result infer(const std::vector>& input_shapes, + const std::unordered_map& data_dependency) override; + port_mask_t get_port_mask() const override { + return PortMask(1); + } +}; + +class UnsqueezeShapeInfer : public ShapeInferEmptyPads { +public: + UnsqueezeShapeInfer() {} + Result infer(const std::vector>& input_shapes, + const std::unordered_map& data_dependency) override; + port_mask_t get_port_mask() const override { + return PortMask(1); + } +}; + +class ReshapeShapeInferFactory : public ShapeInferFactory { +public: + ReshapeShapeInferFactory(std::shared_ptr op) : m_op(op) {} + ShapeInferPtr makeShapeInfer() const override; + +private: + std::shared_ptr m_op; +}; +} // namespace node +} // namespace intel_cpu +} // namespace ov + diff --git a/src/plugins/intel_cpu/src/shape_inference/custom/shapeof.hpp b/src/plugins/intel_cpu/src/shape_inference/custom/shapeof.hpp new file mode 100644 index 00000000000..a311ff5a94d --- /dev/null +++ b/src/plugins/intel_cpu/src/shape_inference/custom/shapeof.hpp @@ -0,0 +1,44 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include "shape_inference/shape_inference_cpu.hpp" + +#pragma once +namespace ov { +namespace intel_cpu { +namespace node { +using Result = IShapeInfer::Result; + +/** + * Implements Shape Of shape inference algorithm. The output shape is simply a 1D tensor with the size of the input tensor + * rank. + * + */ +class ShapeOfShapeInfer : public ShapeInferEmptyPads { +public: + ShapeOfShapeInfer() = default; + Result infer( + const std::vector>& input_shapes, + const std::unordered_map& data_dependency) override { + IE_ASSERT(!input_shapes.empty()); + return {{VectorDims{input_shapes.front().get().size()}}, ShapeInferStatus::success}; + } + + port_mask_t get_port_mask() const override { + return EMPTY_PORT_MASK; + } +}; + +class ShapeOfShapeInferFactory : public ShapeInferFactory { +public: + ShapeInferPtr makeShapeInfer() const override { + return std::make_shared(); + } +}; + +} // namespace node +} // namespace intel_cpu +} // namespace ov + diff --git a/src/plugins/intel_cpu/src/shape_inference/custom/strided_slice.cpp b/src/plugins/intel_cpu/src/shape_inference/custom/strided_slice.cpp new file mode 100644 index 00000000000..19918455ae6 --- /dev/null +++ b/src/plugins/intel_cpu/src/shape_inference/custom/strided_slice.cpp @@ -0,0 +1,94 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "strided_slice.hpp" +#include "utils.hpp" +#include "ie_ngraph_utils.hpp" +#include "slice_shape_inference.hpp" +#include + +namespace ov { +namespace intel_cpu { +namespace node { +using namespace InferenceEngine; +StridedSliceShapeInfer::StridedSliceShapeInfer(size_t output_size, + std::unordered_set begin_mask, + std::unordered_set end_mask, + std::unordered_set new_axis_mask, + std::unordered_set shrink_axis_mask) + : m_outputShape(output_size, 1), + m_begin_mask_set(std::move(begin_mask)), + m_end_mask_set(std::move(end_mask)), + m_new_axis_mask_set(std::move(new_axis_mask)), + m_shrink_axis_mask_set(std::move(shrink_axis_mask)) {} + +Result StridedSliceShapeInfer::infer( + const std::vector>& input_shapes, + const std::unordered_map& data_dependency) { + // align with intel_cpu::node::StridedSlice + static constexpr size_t DATA_ID = 0, BEGIN_ID = 1, END_ID = 2, STRIDE_ID = 3; + const VectorDims& shapeIn = input_shapes[DATA_ID].get(); + const VectorDims& shapeBegin = input_shapes[BEGIN_ID].get(); + if (data_dependency.at(BEGIN_ID)->getDesc().getPrecision() != Precision::I32 || + data_dependency.at(END_ID)->getDesc().getPrecision() != Precision::I32 || + data_dependency.at(STRIDE_ID)->getDesc().getPrecision() != Precision::I32) { + OPENVINO_THROW("The data type of begin/end/stride is NOT I32, which is unexpected!"); + } + auto beginPtr = reinterpret_cast(data_dependency.at(BEGIN_ID)->getData()); + auto endPtr = reinterpret_cast(data_dependency.at(END_ID)->getData()); + auto stridePtr = reinterpret_cast(data_dependency.at(STRIDE_ID)->getData()); + + for (size_t i = 0, new_idx = 0; i < shapeIn.size(); ++i) { + if (m_new_axis_mask_set.count(i)) { + // deal with new_axis_mask + m_outputShape[new_idx] = 1; + m_outputShape[new_idx+1] = shapeIn[i]; + new_idx+=2; + } else if (!m_shrink_axis_mask_set.count(i)) { + // deal with begin_mask and end_mask + if ((i >= shapeBegin[0]) || (shapeIn[i] == 0)) { + m_outputShape[new_idx] = shapeIn[i]; + } else { + auto begin = m_begin_mask_set.count(i) ? 0 : beginPtr[i]; + auto end = m_end_mask_set.count(i) ? shapeIn[i] : endPtr[i]; + m_outputShape[new_idx] = ov::op::slice::get_sliced_value(shapeIn[i], begin, end, stridePtr[i]); + } + new_idx += 1; + } + } + return {{m_outputShape}, ShapeInferStatus::success}; +} + +ShapeInferPtr StridedSliceShapeInferFactory::makeShapeInfer() const { + if (const auto Slice_op = ov::as_type_ptr(m_op)) { + return std::make_shared(make_shape_inference(m_op), port_mask); + } else if (const auto StridedSlice_op = ov::as_type_ptr(m_op)) { + const auto& ellipsis_mask = StridedSlice_op->get_ellipsis_mask(); + if (std::any_of(ellipsis_mask.begin(), ellipsis_mask.end(), [](int64_t x){ return x == 1; })) { + return std::make_shared(make_shape_inference(m_op), port_mask); + } else { + auto vec_to_set = [](const std::vector& vec){ + std::unordered_set to_set; + for (size_t i = 0; i < vec.size(); ++i) { + if (vec[i] == 1) { + to_set.emplace(i); + } + } + return to_set; + }; + return std::make_shared( + m_op->get_output_partial_shape(0).rank().get_length(), + vec_to_set(StridedSlice_op->get_begin_mask()), + vec_to_set(StridedSlice_op->get_end_mask()), + vec_to_set(StridedSlice_op->get_new_axis_mask()), + vec_to_set(StridedSlice_op->get_shrink_axis_mask())); + } + } else { + OPENVINO_THROW("not Slice or StridedSlice"); + } +} + +} // namespace node +} // namespace intel_cpu +} // namespace ov diff --git a/src/plugins/intel_cpu/src/shape_inference/custom/strided_slice.hpp b/src/plugins/intel_cpu/src/shape_inference/custom/strided_slice.hpp new file mode 100644 index 00000000000..13df331b51e --- /dev/null +++ b/src/plugins/intel_cpu/src/shape_inference/custom/strided_slice.hpp @@ -0,0 +1,53 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include "shape_inference/shape_inference_cpu.hpp" + +#pragma once +namespace ov { +namespace intel_cpu { +namespace node { +using Result = IShapeInfer::Result; + +constexpr IShapeInfer::port_mask_t port_mask = PortMask(/*BEGIN_ID*/1, /*END_ID*/2, /*STRIDE_ID*/3, /*AXES_ID*/4); + +class StridedSliceShapeInfer : public ShapeInferEmptyPads { +public: + StridedSliceShapeInfer(size_t output_size, + std::unordered_set begin_mask, + std::unordered_set end_mask, + std::unordered_set new_axis_mask, + std::unordered_set shrink_axis_mask); + + Result infer( + const std::vector>& input_shapes, + const std::unordered_map& data_dependency) override; + + port_mask_t get_port_mask() const override { + return port_mask; + } + +private: + VectorDims m_outputShape; + const std::unordered_set m_begin_mask_set; + const std::unordered_set m_end_mask_set; + const std::unordered_set m_new_axis_mask_set; + const std::unordered_set m_shrink_axis_mask_set; +}; + +class StridedSliceShapeInferFactory : public ShapeInferFactory { +public: + StridedSliceShapeInferFactory(const std::shared_ptr& op) + : m_op(op) {} + ShapeInferPtr makeShapeInfer() const override; + +private: + const std::shared_ptr m_op; +}; + +} // namespace node +} // namespace intel_cpu +} // namespace ov + diff --git a/src/plugins/intel_cpu/src/shape_inference/custom/subgraph.hpp b/src/plugins/intel_cpu/src/shape_inference/custom/subgraph.hpp new file mode 100644 index 00000000000..ef750e0f9a9 --- /dev/null +++ b/src/plugins/intel_cpu/src/shape_inference/custom/subgraph.hpp @@ -0,0 +1,45 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include "shape_inference/shape_inference_cpu.hpp" + +#pragma once +namespace ov { +namespace intel_cpu { +namespace node { +using Result = IShapeInfer::Result; +/* This class implementation is a temporal WA + TODO: revise the implementation to remove the node reference*/ +class SnippetShapeInfer : public ShapeInferEmptyPads { +public: + SnippetShapeInfer(Snippet* node) : m_node(node) {} + Result infer( + const std::vector>& input_shapes, + const std::unordered_map& data_dependency) override { + return {m_node->shapeInfer(), ShapeInferStatus::success}; + } + + port_mask_t get_port_mask() const override { + return EMPTY_PORT_MASK; + } + +private: + Snippet* m_node; +}; + +class SnippetShapeInferFactory : public ShapeInferFactory { +public: + SnippetShapeInferFactory(Snippet* node) : m_node(node) {} + ShapeInferPtr makeShapeInfer() const override { + return std::make_shared(m_node); + } + +private: + Snippet* m_node; +}; +} // namespace node +} // namespace intel_cpu +} // namespace ov + diff --git a/src/plugins/intel_cpu/src/shape_inference/custom/transpose.cpp b/src/plugins/intel_cpu/src/shape_inference/custom/transpose.cpp new file mode 100644 index 00000000000..41f96cb684a --- /dev/null +++ b/src/plugins/intel_cpu/src/shape_inference/custom/transpose.cpp @@ -0,0 +1,41 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "transpose.hpp" +#include "utils.hpp" +#include "ie_ngraph_utils.hpp" + +namespace ov { +namespace intel_cpu { +namespace node { +using namespace InferenceEngine; +TransposeShapeInfer::TransposeShapeInfer(const size_t& out_rank, const std::vector& axes_vec) + : m_out_rank(out_rank), m_axes_vec(axes_vec), m_outputShape(out_rank, 1), m_needReverse(axes_vec.empty()) {} + +Result TransposeShapeInfer::infer( + const std::vector>& input_shapes, + const std::unordered_map& data_dependency) { + const VectorDims& shapeIn = input_shapes[0].get(); + if (m_needReverse) { + for (size_t i = 0; i < m_out_rank; ++i) { + m_outputShape[i] = shapeIn[m_out_rank - 1 - i]; + } + } else { + for (size_t i = 0; i < m_out_rank; ++i) { + m_outputShape[i] = shapeIn[m_axes_vec[i]]; + } + } + return {{m_outputShape}, ShapeInferStatus::success}; +} +ShapeInferPtr TransposeShapeInferFactory::makeShapeInfer() const { + if (const auto order = ov::as_type_ptr(m_op->get_input_node_shared_ptr(ov::op::v1::Transpose::ORDER))) { + const auto axes_vec = order->cast_vector(); + return std::make_shared(m_op->get_output_partial_shape(0).rank().get_length(), axes_vec); + } else { + return std::make_shared(); + } +} +} // namespace node +} // namespace intel_cpu +} // namespace ov diff --git a/src/plugins/intel_cpu/src/shape_inference/custom/transpose.hpp b/src/plugins/intel_cpu/src/shape_inference/custom/transpose.hpp new file mode 100644 index 00000000000..d1d000ab869 --- /dev/null +++ b/src/plugins/intel_cpu/src/shape_inference/custom/transpose.hpp @@ -0,0 +1,59 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include "shape_inference/shape_inference_cpu.hpp" + +#pragma once +namespace ov { +namespace intel_cpu { +namespace node { +using Result = IShapeInfer::Result; + +class TransposeDynShapeInfer : public ShapeInferEmptyPads { +public: + TransposeDynShapeInfer() = default; + Result infer( + const std::vector>& input_shapes, + const std::unordered_map& data_dependency) override { + OPENVINO_THROW("TODO: Support parameterized Order input for dynamic shapes."); + } + port_mask_t get_port_mask() const override { + return EMPTY_PORT_MASK; + } +private: +}; + +class TransposeShapeInfer : public ShapeInferEmptyPads { +public: + TransposeShapeInfer(const size_t& out_rank, const std::vector& axes_vec); + + Result infer( + const std::vector>& input_shapes, + const std::unordered_map& data_dependency) override; + + port_mask_t get_port_mask() const override { + return EMPTY_PORT_MASK; + } + +private: + const size_t m_out_rank; + const std::vector m_axes_vec; + VectorDims m_outputShape; + const bool m_needReverse; +}; + +class TransposeShapeInferFactory : public ShapeInferFactory { +public: + TransposeShapeInferFactory(const std::shared_ptr& op) : m_op(op) {} + ShapeInferPtr makeShapeInfer() const override; + +private: + const std::shared_ptr m_op; +}; + +} // namespace node +} // namespace intel_cpu +} // namespace ov + diff --git a/src/plugins/intel_cpu/src/utils/shape_inference/shape_inference.cpp b/src/plugins/intel_cpu/src/shape_inference/shape_inference.cpp similarity index 100% rename from src/plugins/intel_cpu/src/utils/shape_inference/shape_inference.cpp rename to src/plugins/intel_cpu/src/shape_inference/shape_inference.cpp diff --git a/src/plugins/intel_cpu/src/utils/shape_inference/shape_inference.hpp b/src/plugins/intel_cpu/src/shape_inference/shape_inference.hpp similarity index 100% rename from src/plugins/intel_cpu/src/utils/shape_inference/shape_inference.hpp rename to src/plugins/intel_cpu/src/shape_inference/shape_inference.hpp diff --git a/src/plugins/intel_cpu/src/utils/shape_inference/shape_inference_cpu.cpp b/src/plugins/intel_cpu/src/shape_inference/shape_inference_cpu.cpp similarity index 100% rename from src/plugins/intel_cpu/src/utils/shape_inference/shape_inference_cpu.cpp rename to src/plugins/intel_cpu/src/shape_inference/shape_inference_cpu.cpp diff --git a/src/plugins/intel_cpu/src/utils/shape_inference/shape_inference_cpu.hpp b/src/plugins/intel_cpu/src/shape_inference/shape_inference_cpu.hpp similarity index 100% rename from src/plugins/intel_cpu/src/utils/shape_inference/shape_inference_cpu.hpp rename to src/plugins/intel_cpu/src/shape_inference/shape_inference_cpu.hpp diff --git a/src/plugins/intel_cpu/src/utils/shape_inference/shape_inference_internal_dyn.hpp b/src/plugins/intel_cpu/src/shape_inference/shape_inference_internal_dyn.hpp similarity index 100% rename from src/plugins/intel_cpu/src/utils/shape_inference/shape_inference_internal_dyn.hpp rename to src/plugins/intel_cpu/src/shape_inference/shape_inference_internal_dyn.hpp diff --git a/src/plugins/intel_cpu/src/utils/shape_inference/shape_inference_ngraph.cpp b/src/plugins/intel_cpu/src/shape_inference/shape_inference_ngraph.cpp similarity index 100% rename from src/plugins/intel_cpu/src/utils/shape_inference/shape_inference_ngraph.cpp rename to src/plugins/intel_cpu/src/shape_inference/shape_inference_ngraph.cpp diff --git a/src/plugins/intel_cpu/src/utils/shape_inference/shape_inference_ngraph.hpp b/src/plugins/intel_cpu/src/shape_inference/shape_inference_ngraph.hpp similarity index 100% rename from src/plugins/intel_cpu/src/utils/shape_inference/shape_inference_ngraph.hpp rename to src/plugins/intel_cpu/src/shape_inference/shape_inference_ngraph.hpp diff --git a/src/plugins/intel_cpu/src/utils/shape_inference/shape_inference_pass_through.hpp b/src/plugins/intel_cpu/src/shape_inference/shape_inference_pass_through.hpp similarity index 100% rename from src/plugins/intel_cpu/src/utils/shape_inference/shape_inference_pass_through.hpp rename to src/plugins/intel_cpu/src/shape_inference/shape_inference_pass_through.hpp diff --git a/src/plugins/intel_cpu/src/utils/shape_inference/shape_inference_status.hpp b/src/plugins/intel_cpu/src/shape_inference/shape_inference_status.hpp similarity index 100% rename from src/plugins/intel_cpu/src/utils/shape_inference/shape_inference_status.hpp rename to src/plugins/intel_cpu/src/shape_inference/shape_inference_status.hpp diff --git a/src/plugins/intel_cpu/src/utils/shape_inference/static_dimension.cpp b/src/plugins/intel_cpu/src/shape_inference/static_dimension.cpp similarity index 100% rename from src/plugins/intel_cpu/src/utils/shape_inference/static_dimension.cpp rename to src/plugins/intel_cpu/src/shape_inference/static_dimension.cpp diff --git a/src/plugins/intel_cpu/src/utils/shape_inference/static_dimension.hpp b/src/plugins/intel_cpu/src/shape_inference/static_dimension.hpp similarity index 100% rename from src/plugins/intel_cpu/src/utils/shape_inference/static_dimension.hpp rename to src/plugins/intel_cpu/src/shape_inference/static_dimension.hpp diff --git a/src/plugins/intel_cpu/src/utils/shape_inference/static_shape.cpp b/src/plugins/intel_cpu/src/shape_inference/static_shape.cpp similarity index 100% rename from src/plugins/intel_cpu/src/utils/shape_inference/static_shape.cpp rename to src/plugins/intel_cpu/src/shape_inference/static_shape.cpp diff --git a/src/plugins/intel_cpu/src/utils/shape_inference/static_shape.hpp b/src/plugins/intel_cpu/src/shape_inference/static_shape.hpp similarity index 100% rename from src/plugins/intel_cpu/src/utils/shape_inference/static_shape.hpp rename to src/plugins/intel_cpu/src/shape_inference/static_shape.hpp diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/op/ngram.cpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/op/ngram.cpp index 942bb81e0ed..3781e59bc78 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/op/ngram.cpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/op/ngram.cpp @@ -27,7 +27,7 @@ void ov::intel_cpu::NgramNode::validate_and_infer_types() { NGRAPH_CHECK(m_k > 0, "k attribute must be greater than zero"); const auto& idces_et = get_input_element_type(1); - const auto& idces_shape = get_input_partial_shape(0); + const auto& idces_shape = get_input_partial_shape(1); NGRAPH_CHECK(idces_shape.rank() == 2, "'batch_idces' input must have 2D shape whereas current shape is", idces_shape); NGRAPH_CHECK(idces_et.is_integral_number(), "'batch_idces' input must be integer whereas current element type is", idces_et); diff --git a/src/plugins/intel_cpu/tests/unit/CMakeLists.txt b/src/plugins/intel_cpu/tests/unit/CMakeLists.txt index 2fc24079fe6..9299fe7b92b 100644 --- a/src/plugins/intel_cpu/tests/unit/CMakeLists.txt +++ b/src/plugins/intel_cpu/tests/unit/CMakeLists.txt @@ -81,6 +81,21 @@ ie_faster_build(${TARGET_NAME} UNITY ) +function(group_source_file GROUP_NAME GROUP_DIR) + file(GLOB GROUP_FILES ${GROUP_DIR}/*.cpp) + foreach(file ${GROUP_FILES}) + set_source_files_properties(${file} PROPERTIES UNITY_GROUP ${GROUP_NAME}) + endforeach() +endfunction() + +if(ENABLE_FASTER_BUILD) + set_target_properties(${TARGET_NAME} PROPERTIES UNITY_BUILD ON UNITY_BUILD_MODE GROUP) + group_source_file(unit_src_nodes ${CMAKE_CURRENT_SOURCE_DIR}/nodes) + group_source_file(unit_src_snippets_transformations ${CMAKE_CURRENT_SOURCE_DIR}/snippets_transformations) + group_source_file(unit_src_ngrah_transformation ${CMAKE_CURRENT_SOURCE_DIR}/ngraph_transformations) + group_source_file(unit_src_custom_shape_infer ${CMAKE_CURRENT_SOURCE_DIR}/shape_inference_test/custom_shape_infer) +endif() + target_include_directories(${TARGET_NAME} SYSTEM PRIVATE $/src/common $/src/cpu diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/binary_elementwise_arithmetic.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/binary_elementwise_arithmetic.cpp index a3e7abb0afb..6a174e392fd 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/binary_elementwise_arithmetic.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/binary_elementwise_arithmetic.cpp @@ -7,8 +7,8 @@ #include "utils.hpp" #include "openvino/op/ops.hpp" #include "openvino/op/parameter.hpp" -#include "utils/shape_inference/shape_inference.hpp" -#include "utils/shape_inference/static_shape.hpp" +#include "shape_inference/shape_inference.hpp" +#include "shape_inference/static_shape.hpp" using namespace ov; using namespace ov::intel_cpu; diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/concat_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/concat_shape_inference_test.cpp index 4df0e02583a..444a1f7dec6 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/concat_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/concat_shape_inference_test.cpp @@ -7,7 +7,7 @@ #include "openvino/op/parameter.hpp" #include "openvino/pass/graph_rewrite.hpp" #include "utils.hpp" -#include "utils/shape_inference/static_shape.hpp" +#include "shape_inference/static_shape.hpp" using namespace ov; using namespace ov::intel_cpu; diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/adaptive_avg_pool.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/adaptive_avg_pool.cpp new file mode 100644 index 00000000000..76c5ab9cf58 --- /dev/null +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/adaptive_avg_pool.cpp @@ -0,0 +1,77 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include "common_test_utils/test_assertions.hpp" +#include "custom_shape_infer.hpp" +#include "openvino/op/ops.hpp" +namespace ov { +namespace intel_cpu { +namespace unit_test { +namespace cpu_shape_infer { +using namespace ov; +using namespace ov::intel_cpu; +using namespace testing; + +using AdaptiveAvgPoolV8TestParams = std::tuple, // output_shapes + StaticShape // Expected shape + >; + +class AdaptiveAvgPoolV8CpuShapeInferenceTest : public unit_test::OpCpuShapeInferenceTest, + public WithParamInterface { +public: + static std::string getTestCaseName(const testing::TestParamInfo& obj) { + unit_test::ShapeVector tmp_input_shapes; + std::vector tmp_axes; + StaticShape tmp_exp_shape; + std::tie(tmp_input_shapes, tmp_axes, tmp_exp_shape) = obj.param; + std::ostringstream result; + result << "IS" << CommonTestUtils::vec2str(tmp_input_shapes) << "_"; + result << "sd" << CommonTestUtils::vec2str(tmp_axes) << "_"; + result << "exp_shape" << tmp_exp_shape; + return result.str(); + } + +protected: + void SetUp() override { + std::tie(input_shapes, axes, exp_shape) = GetParam(); + output_shapes = unit_test::ShapeVector(0); + output_shapes.push_back(exp_shape); + arg = std::make_shared(element::f64, input_shapes.front().get_shape()); + } + std::vector axes; + std::shared_ptr arg; + bool specalZero; +}; + +TEST_P(AdaptiveAvgPoolV8CpuShapeInferenceTest , shape_inference_empty_const_map) { + const auto axes_node = std::make_shared(element::i32, ov::Shape{axes.size()}, axes); + const auto op = make_op(arg, axes_node); + + unit_test::cpu_test_shape_infer(op.get(), input_shapes, output_shapes); +} + +TEST_P(AdaptiveAvgPoolV8CpuShapeInferenceTest , shape_inference_with_const_map) { + const auto axes_node = std::make_shared(element::i32, PartialShape::dynamic()); + const auto op = make_op(arg, axes_node); + + const auto axes_const = std::make_shared(element::i32, ov::Shape{axes.size()}, axes); + const auto axes_tensor = std::make_shared(axes_const); + const std::map& constant_data = {{1, axes_tensor}}; + + unit_test::cpu_test_shape_infer(op.get(), input_shapes, output_shapes, constant_data); +} + +INSTANTIATE_TEST_SUITE_P( + CpuShapeInfer, + AdaptiveAvgPoolV8CpuShapeInferenceTest , + Values(make_tuple(unit_test::ShapeVector{{1, 3, 1, 2}, {2}}, std::vector{10, 20}, StaticShape({1, 3, 10, 20})), + make_tuple(unit_test::ShapeVector{{1, 2, 10}, {1}}, std::vector{17}, StaticShape({1, 2, 17}))), + AdaptiveAvgPoolV8CpuShapeInferenceTest::getTestCaseName); +} // namespace cpu_shape_infer +} // namespace unit_test +} // namespace intel_cpu +} // namespace ov + diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/adaptive_max_pool.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/adaptive_max_pool.cpp new file mode 100644 index 00000000000..917f8586291 --- /dev/null +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/adaptive_max_pool.cpp @@ -0,0 +1,80 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include "common_test_utils/test_assertions.hpp" +#include "custom_shape_infer.hpp" +#include "openvino/op/ops.hpp" +namespace ov { +namespace intel_cpu { +namespace unit_test { +namespace cpu_shape_infer { + +using namespace ov; +using namespace ov::intel_cpu; +using namespace testing; + +using AdaptiveMaxPoolV8TestParams = std::tuple, // output_shapes + StaticShape // Expected shape + >; + +class AdaptiveMaxPoolV8CpuShapeInferenceTest : public unit_test::OpCpuShapeInferenceTest, + public WithParamInterface { +public: + static std::string getTestCaseName(const testing::TestParamInfo& obj) { + unit_test::ShapeVector tmp_input_shapes; + std::vector tmp_axes; + StaticShape tmp_exp_shape; + std::tie(tmp_input_shapes, tmp_axes, tmp_exp_shape) = obj.param; + std::ostringstream result; + result << "IS" << CommonTestUtils::vec2str(tmp_input_shapes) << "_"; + result << "sd" << CommonTestUtils::vec2str(tmp_axes) << "_"; + result << "exp_shape" << tmp_exp_shape; + return result.str(); + } + +protected: + void SetUp() override { + std::tie(input_shapes, axes, exp_shape) = GetParam(); + output_shapes = unit_test::ShapeVector(0); + output_shapes.push_back(exp_shape); + output_shapes.push_back(exp_shape); + arg = std::make_shared(element::f64, input_shapes.front().get_shape()); + } + + std::vector axes; + std::shared_ptr arg; + bool specalZero; +}; + +TEST_P(AdaptiveMaxPoolV8CpuShapeInferenceTest , shape_inference_empty_const_map) { + const auto axes_node = std::make_shared(element::i32, ov::Shape{axes.size()}, axes); + const auto op = make_op(arg, axes_node); + + unit_test::cpu_test_shape_infer(op.get(), input_shapes, output_shapes); +} + +TEST_P(AdaptiveMaxPoolV8CpuShapeInferenceTest , shape_inference_with_const_map) { + const auto axes_node = std::make_shared(element::i32, PartialShape::dynamic()); + const auto op = make_op(arg, axes_node); + + const auto axes_const = std::make_shared(element::i32, ov::Shape{axes.size()}, axes); + const auto axes_tensor = std::make_shared(axes_const); + const std::map& constant_data = {{1, axes_tensor}}; + + unit_test::cpu_test_shape_infer(op.get(), input_shapes, output_shapes, constant_data); +} + +INSTANTIATE_TEST_SUITE_P( + CpuShapeInfer, + AdaptiveMaxPoolV8CpuShapeInferenceTest , + Values(make_tuple(unit_test::ShapeVector{{1, 3, 1, 2}, {2}}, std::vector{10, 20}, StaticShape({1, 3, 10, 20})), + make_tuple(unit_test::ShapeVector{{1, 2, 10}, {1}}, std::vector{17}, StaticShape({1, 2, 17}))), + AdaptiveMaxPoolV8CpuShapeInferenceTest::getTestCaseName); +} // namespace cpu_shape_infer +} // namespace unit_test +} // namespace intel_cpu +} // namespace ov + diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/binary_elementwise_arithmetic.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/binary_elementwise_arithmetic.cpp new file mode 100644 index 00000000000..1b515222b85 --- /dev/null +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/binary_elementwise_arithmetic.cpp @@ -0,0 +1,124 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include "openvino/op/ops.hpp" +#include "common_test_utils/test_assertions.hpp" +#include "custom_shape_infer.hpp" +namespace ov { +namespace intel_cpu { +namespace unit_test { +namespace cpu_shape_infer { + +using namespace ov; +using namespace ov::intel_cpu; + +template +class CpuShapeInferenceTest_BEA : public testing::Test {}; + +// CpuShapeInferenceTest for BinaryElementwiseArithmetis (BEA) operations +TYPED_TEST_SUITE_P(CpuShapeInferenceTest_BEA); + +TYPED_TEST_P(CpuShapeInferenceTest_BEA, shape_inference_autob_numpy_equal_rank) { + auto A = std::make_shared(element::f32, PartialShape{-1, -1, -1, -1}); + auto B = std::make_shared(element::f32, PartialShape{-1, -1, -1, -1}); + + auto node = std::make_shared(A, B); + + std::vector static_input_shapes = {StaticShape{3, 1, 1, 5}, StaticShape{3, 1, 6, 1}}, + static_output_shapes = {StaticShape{3, 1, 6, 5}}; + + unit_test::cpu_test_shape_infer(node.get(), static_input_shapes, static_output_shapes); +} + +TYPED_TEST_P(CpuShapeInferenceTest_BEA, shape_inference_autob_numpy_a_rank_higher) { + auto A = std::make_shared(element::f32, PartialShape{-1, -1, -1, -1}); + auto B = std::make_shared(element::f32, PartialShape{-1, -1, -1}); + + auto node = std::make_shared(A, B); + + std::vector static_input_shapes = {StaticShape{3, 4, 1, 5}, StaticShape{4, 6, 1}}, + static_output_shapes = {StaticShape{3, 4, 6, 5}}; + + unit_test::cpu_test_shape_infer(node.get(), static_input_shapes, static_output_shapes); +} + +TYPED_TEST_P(CpuShapeInferenceTest_BEA, shape_inference_autob_numpy_b_rank_higher) { + auto A = std::make_shared(element::f32, PartialShape{-1, -1, -1}); + auto B = std::make_shared(element::f32, PartialShape{-1, -1, -1, -1}); + + auto node = std::make_shared(A, B); + + std::vector static_input_shapes = {StaticShape{4, 6, 1}, StaticShape{3, 4, 1, 5}}, + static_output_shapes = {StaticShape{3, 4, 6, 5}}; + + unit_test::cpu_test_shape_infer(node.get(), static_input_shapes, static_output_shapes); +} + +TYPED_TEST_P(CpuShapeInferenceTest_BEA, shape_inference_autob_numpy_incompatible_shapes) { + auto A = std::make_shared(element::f32, PartialShape{-1, -1, -1, -1}); + auto B = std::make_shared(element::f32, PartialShape{-1, -1, -1, -1}); + + auto node = std::make_shared(A, B); + + std::vector static_input_shapes = {StaticShape{3, 4, 6, 5}, StaticShape{2, 4, 6, 5}}, + static_output_shapes = {StaticShape{}}; + + OV_EXPECT_THROW(unit_test::cpu_test_shape_infer(node.get(), static_input_shapes, static_output_shapes), + ov::Exception, + testing::HasSubstr("Eltwise shape infer input shapes dim index:")); +} + +TYPED_TEST_P(CpuShapeInferenceTest_BEA, shape_inference_aubtob_none) { + auto A = std::make_shared(element::f32, PartialShape{-1, -1, -1, -1}); + auto B = std::make_shared(element::f32, PartialShape{-1, -1, -1, -1}); + + auto node = std::make_shared(A, B, op::AutoBroadcastType::NONE); + + std::vector static_input_shapes = {StaticShape{3, 4, 6, 5}, StaticShape{3, 4, 6, 5}}, + static_output_shapes = {StaticShape{3, 4, 6, 5}}; + + unit_test::cpu_test_shape_infer(node.get(), static_input_shapes, static_output_shapes); +} + +TYPED_TEST_P(CpuShapeInferenceTest_BEA, shape_inference_aubtob_none_incompatible_shapes) { + GTEST_SKIP() << "Skipping test, please check CVS-108946"; + auto A = std::make_shared(element::f32, PartialShape{-1, -1, -1, -1}); + auto B = std::make_shared(element::f32, PartialShape{-1, -1, -1, -1}); + + auto node = std::make_shared(A, B, op::AutoBroadcastType::NONE); + + std::vector static_input_shapes = {StaticShape{3, 4, 6, 5}, StaticShape{3, 1, 6, 1}}, + static_output_shapes = {StaticShape{}}; + + //TODO cvs-108946, below test can't pass. + OV_EXPECT_THROW(unit_test::cpu_test_shape_infer(node.get(), static_input_shapes, static_output_shapes), + ov::Exception, + testing::HasSubstr("Eltwise shape infer input shapes dim index:")); +} + +REGISTER_TYPED_TEST_SUITE_P(CpuShapeInferenceTest_BEA, + shape_inference_autob_numpy_equal_rank, + shape_inference_autob_numpy_a_rank_higher, + shape_inference_autob_numpy_b_rank_higher, + shape_inference_autob_numpy_incompatible_shapes, + shape_inference_aubtob_none, + shape_inference_aubtob_none_incompatible_shapes); + +INSTANTIATE_TYPED_TEST_SUITE_P(CpuShapeInfer_add, CpuShapeInferenceTest_BEA, ::testing::Types); +INSTANTIATE_TYPED_TEST_SUITE_P(CpuShapeInfer_divide, CpuShapeInferenceTest_BEA, ::testing::Types); +INSTANTIATE_TYPED_TEST_SUITE_P(CpuShapeInfer_floor_mod, CpuShapeInferenceTest_BEA, ::testing::Types); +INSTANTIATE_TYPED_TEST_SUITE_P(CpuShapeInfer_maximum, CpuShapeInferenceTest_BEA, ::testing::Types); +INSTANTIATE_TYPED_TEST_SUITE_P(CpuShapeInfer_minimum, CpuShapeInferenceTest_BEA, ::testing::Types); +INSTANTIATE_TYPED_TEST_SUITE_P(CpuShapeInfer_mod, CpuShapeInferenceTest_BEA, ::testing::Types); +INSTANTIATE_TYPED_TEST_SUITE_P(CpuShapeInfer_multiply, CpuShapeInferenceTest_BEA, ::testing::Types); +INSTANTIATE_TYPED_TEST_SUITE_P(CpuShapeInfer_power, CpuShapeInferenceTest_BEA, ::testing::Types); +INSTANTIATE_TYPED_TEST_SUITE_P(CpuShapeInfer_squared_difference, CpuShapeInferenceTest_BEA, ::testing::Types); +INSTANTIATE_TYPED_TEST_SUITE_P(CpuShapeInfer_subtract, CpuShapeInferenceTest_BEA, ::testing::Types); + +} // namespace cpu_shape_infer +} // namespace unit_test +} // namespace intel_cpu +} // namespace ov + diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/color_covert_shape_inference.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/color_covert_shape_inference.cpp new file mode 100644 index 00000000000..43be1737e6b --- /dev/null +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/color_covert_shape_inference.cpp @@ -0,0 +1,110 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "common_test_utils/test_assertions.hpp" +#include "openvino/op/ops.hpp" +#include "custom_shape_infer.hpp" +namespace ov { +namespace intel_cpu { +namespace unit_test { +namespace cpu_shape_infer { + +using namespace ov; +using namespace ov::intel_cpu; + +template +class CpuShapeInferenceTest_ColorConvertNV12 : public testing::Test {}; + +// CpuShapeInferenceTest for BinaryElementwiseArithmetis (ColorConvert) operations +TYPED_TEST_SUITE_P(CpuShapeInferenceTest_ColorConvertNV12); + +TYPED_TEST_P(CpuShapeInferenceTest_ColorConvertNV12, singlePlane) { + auto data = std::make_shared(element::f32, PartialShape{-1, -1, -1, -1}); + auto op = std::make_shared(data); + std::vector static_input_shapes = {StaticShape{1, 720, 640, 1}}; + std::vector static_output_shapes = {StaticShape{1, 480, 640, 3}}; + unit_test::cpu_test_shape_infer(op.get(), static_input_shapes, static_output_shapes); +} + +TYPED_TEST_P(CpuShapeInferenceTest_ColorConvertNV12, multiPlane) { + auto dataY = std::make_shared(element::f32, PartialShape{-1, -1, -1, -1}); + auto dataUV = std::make_shared(element::f32, PartialShape{-1, -1, -1, -1}); + auto op = std::make_shared(dataY, dataUV); + std::vector static_input_shapes = {StaticShape{1, 480, 640, 1}, StaticShape{1, 240, 320, 2}}; + std::vector static_output_shapes = {StaticShape{1, 480, 640, 3}}; + unit_test::cpu_test_shape_infer(op.get(), static_input_shapes, static_output_shapes); +} + +TYPED_TEST_P(CpuShapeInferenceTest_ColorConvertNV12, novalid_input) { + auto dataY = std::make_shared(element::f32, PartialShape{-1, -1, -1, -1}); + auto dataUV = std::make_shared(element::f32, PartialShape{-1, -1, -1, -1}); + auto op = std::make_shared(dataY, dataUV); + std::vector static_input_shapes = {StaticShape{480, 640, 1}, StaticShape{240, 320, 2}}; + std::vector static_output_shapes = {StaticShape{}}; + + OV_EXPECT_THROW(unit_test::cpu_test_shape_infer(op.get(), static_input_shapes, static_output_shapes), + ov::Exception, + testing::HasSubstr("NV12Converter node has incorrect input dimensions")); +} + +REGISTER_TYPED_TEST_SUITE_P(CpuShapeInferenceTest_ColorConvertNV12, + singlePlane, + multiPlane, + novalid_input); + +INSTANTIATE_TYPED_TEST_SUITE_P(CpuShapeInfer_NV12toBGR, CpuShapeInferenceTest_ColorConvertNV12, ::testing::Types); +INSTANTIATE_TYPED_TEST_SUITE_P(CpuShapeInfer_NV12toRGB, CpuShapeInferenceTest_ColorConvertNV12, ::testing::Types); + +template +class CpuShapeInferenceTest_ColorConvertI420 : public testing::Test {}; + +// CpuShapeInferenceTest for BinaryElementwiseArithmetis (ColorConvert) operations +TYPED_TEST_SUITE_P(CpuShapeInferenceTest_ColorConvertI420); + +TYPED_TEST_P(CpuShapeInferenceTest_ColorConvertI420, singlePlane) { + auto data = std::make_shared(element::f32, PartialShape{-1, -1, -1, -1}); + auto op = std::make_shared(data); + std::vector static_input_shapes = {StaticShape{1, 720, 640, 1}}; + std::vector static_output_shapes = {StaticShape{1, 480, 640, 3}}; + unit_test::cpu_test_shape_infer(op.get(), static_input_shapes, static_output_shapes); +} + +TYPED_TEST_P(CpuShapeInferenceTest_ColorConvertI420, multiPlane) { + auto dataY = std::make_shared(element::f32, PartialShape{-1, -1, -1, -1}); + auto dataU = std::make_shared(element::f32, PartialShape{-1, -1, -1, -1}); + auto dataV = std::make_shared(element::f32, PartialShape{-1, -1, -1, -1}); + auto op = std::make_shared(dataY, dataU, dataV); + std::vector static_input_shapes = {StaticShape{1, 480, 640, 1}, StaticShape{1, 240, 320, 1}, StaticShape{1, 240, 320, 1}}; + std::vector static_output_shapes = {StaticShape{1, 480, 640, 3}}; + unit_test::cpu_test_shape_infer(op.get(), static_input_shapes, static_output_shapes); +} + +TYPED_TEST_P(CpuShapeInferenceTest_ColorConvertI420, novalid_input) { + auto dataY = std::make_shared(element::f32, PartialShape{-1, -1, -1, -1}); + auto dataU = std::make_shared(element::f32, PartialShape{-1, -1, -1, -1}); + auto dataV = std::make_shared(element::f32, PartialShape{-1, -1, -1, -1}); + auto op = std::make_shared(dataY, dataU, dataV); + std::vector static_input_shapes = {StaticShape{480, 640, 1}, StaticShape{240, 320, 1}, StaticShape{240, 320, 1}}; + std::vector static_output_shapes = {StaticShape{}}; + + OV_EXPECT_THROW(unit_test::cpu_test_shape_infer(op.get(), static_input_shapes, static_output_shapes), + ov::Exception, + testing::HasSubstr("NV12Converter node has incorrect input dimensions")); +} + +REGISTER_TYPED_TEST_SUITE_P(CpuShapeInferenceTest_ColorConvertI420, + singlePlane, + multiPlane, + novalid_input); + +INSTANTIATE_TYPED_TEST_SUITE_P(CpuShapeInfer_I420toBGR, CpuShapeInferenceTest_ColorConvertI420, ::testing::Types); +INSTANTIATE_TYPED_TEST_SUITE_P(CpuShapeInfer_I420toRGB, CpuShapeInferenceTest_ColorConvertI420, ::testing::Types); + +} // namespace cpu_shape_infer +} // namespace unit_test +} // namespace intel_cpu +} // namespace ov + diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/custom_shape_infer.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/custom_shape_infer.cpp new file mode 100644 index 00000000000..3cf0753ae34 --- /dev/null +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/custom_shape_infer.cpp @@ -0,0 +1,146 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#include +#include "openvino/core/partial_shape.hpp" +#include "openvino/core/type.hpp" +#include "openvino/op/ops.hpp" +#include "openvino/op/parameter.hpp" +#include "shape_inference/custom/reshape.hpp" +#include "shape_inference/custom/gather.hpp" +#include "shape_inference/custom/transpose.hpp" +#include "shape_inference/custom/color_convert.hpp" +#include "shape_inference/custom/eltwise.hpp" +#include "shape_inference/custom/adaptive_pooling.hpp" +#include "shape_inference/custom/fullyconnected.hpp" +#include "shape_inference/custom/matmul.hpp" +#include "shape_inference/custom/ngram.hpp" +#include "shape_inference/custom/one_hot.hpp" +#include "shape_inference/custom/priorbox.hpp" +#include "shape_inference/custom/priorbox_clustered.hpp" +#include "shape_inference/custom/shapeof.hpp" +#include "shape_inference/custom/strided_slice.hpp" +#include "ie_ngraph_utils.hpp" +#include "custom_shape_infer.hpp" +#include "shape_inference/shape_inference_status.hpp" +#include +namespace ov { +namespace intel_cpu { +namespace unit_test { +namespace { +#define INTEL_CPU_CUSTOM_SHAPE_INFER(__prim, __type) \ + registerNodeIfRequired(intel_cpu, __prim, __type, __prim) + +class EltwiseShapeInferTestFactory : public node::EltwiseShapeInferFactory { +public: + EltwiseShapeInferTestFactory(std::shared_ptr op) : EltwiseShapeInferFactory() {} +}; + +class ShapeOfShapeInferTestFactory : public node::ShapeOfShapeInferFactory { +public: + ShapeOfShapeInferTestFactory(std::shared_ptr op) : ShapeOfShapeInferFactory() {} +}; + +class CustomShapeInferFF : public openvino::cc::Factory& op)> { +public: + CustomShapeInferFF():Factory("CpuCustomShapeInferTestFactory") { + INTEL_CPU_CUSTOM_SHAPE_INFER(node::AdaptivePoolingShapeInferFactory, Type::AdaptivePooling); + INTEL_CPU_CUSTOM_SHAPE_INFER(EltwiseShapeInferTestFactory, Type::Eltwise); + INTEL_CPU_CUSTOM_SHAPE_INFER(node::FCShapeInferFactory, Type::FullyConnected); + INTEL_CPU_CUSTOM_SHAPE_INFER(node::TransposeShapeInferFactory, Type::Transpose); + INTEL_CPU_CUSTOM_SHAPE_INFER(ShapeOfShapeInferTestFactory, Type::ShapeOf); + INTEL_CPU_CUSTOM_SHAPE_INFER(node::ColorConvertShapeInferFactory, Type::ColorConvert); + INTEL_CPU_CUSTOM_SHAPE_INFER(node::ReshapeShapeInferFactory, Type::Reshape); + INTEL_CPU_CUSTOM_SHAPE_INFER(node::MMShapeInferFactory, Type::MatMul); + INTEL_CPU_CUSTOM_SHAPE_INFER(node::OneHotShapeInferFactory, Type::OneHot); + INTEL_CPU_CUSTOM_SHAPE_INFER(node::StridedSliceShapeInferFactory, Type::StridedSlice); + INTEL_CPU_CUSTOM_SHAPE_INFER(node::PriorBoxShapeInferFactory, Type::PriorBox); + INTEL_CPU_CUSTOM_SHAPE_INFER(node::PriorBoxClusteredShapeInferFactory, Type::PriorBoxClustered); + INTEL_CPU_CUSTOM_SHAPE_INFER(node::NgramShapeInferFactory, Type::Ngram); + INTEL_CPU_CUSTOM_SHAPE_INFER(node::GatherShapeInferFactory, Type::Gather); +#undef INTEL_CPU_CUSTOM_SHAPE_INFER + } + + ShapeInferFactory* create(const std::shared_ptr& op) { + ShapeInferFactory* newShapeInferFactory = nullptr; + std::unique_ptr ol(createNodeIfRegistered(intel_cpu, TypeFromName(op->get_type_name()), op)); + if (ol != nullptr) { + newShapeInferFactory = ol.release(); + } + return newShapeInferFactory; + } +}; + +void compare_result(const std::vector& ref, const std::vector& cus) { + ASSERT_EQ(ref.size(), cus.size()); + for (size_t i = 0; i < ref.size(); i++) { + ASSERT_EQ(ref[i].size(), cus[i].size()); + for (size_t y = 0; y < ref[i].size(); y++) { + ASSERT_EQ(ref[i][y].get_length(), cus[i][y]); + } + } +} + +} //namespace + +void cpu_test_shape_infer(ov::Node* op, + const std::vector& input_shapes, + std::vector& output_shapes, + const std::map& constant_data) { + static std::shared_ptr cusFactory = std::make_shared(); + auto shapeInferFactory = cusFactory->create(op->shared_from_this()); + ASSERT_TRUE(shapeInferFactory != nullptr); + auto cusShapeInfer = shapeInferFactory->makeShapeInfer(); + std::vector> cusInputShapes; + std::vector tmpInputShapes; + cusInputShapes.reserve(input_shapes.size()); + tmpInputShapes.reserve(input_shapes.size()); + for (size_t port = 0; port < input_shapes.size(); ++port) { + VectorDims dims; + for (size_t i = 0; i < input_shapes[port].size(); ++i) { + dims.emplace_back(input_shapes[port][i].get_length()); + } + tmpInputShapes.emplace_back(dims); + cusInputShapes.emplace_back(std::ref(tmpInputShapes[port])); + } + + std::unordered_map cusInputValues; + auto input_value_port_mask = cusShapeInfer->get_port_mask(); + dnnl::engine eng; + if (input_value_port_mask) { + for (size_t port = 0; port < input_shapes.size(); ++port) { + if (input_value_port_mask & (1 << port)) { + const auto tensorIter = constant_data.find(port); + const void* data = nullptr; + ov::element::Type elementType; + if (tensorIter != constant_data.end()) { + const auto tensor = tensorIter->second; + data = tensor->get_data_ptr(); + elementType = tensor->get_element_type(); + } else { + const auto input_op = op->input_value(port).get_node_shared_ptr(); + const auto const_op = ov::as_type_ptr(input_op); + ASSERT_TRUE(const_op != nullptr); + data = const_op->get_data_ptr(); + elementType = const_op->get_element_type(); + } + CpuBlockedMemoryDesc desc( + InferenceEngine::details::convertPrecision(elementType), + ov::intel_cpu::Shape(tmpInputShapes[port])); + MemoryPtr memoryPtr = std::make_shared(eng, desc, data, true); + cusInputValues[port] = memoryPtr; + } + } + } + auto result = cusShapeInfer->infer(cusInputShapes, cusInputValues); + compare_result(output_shapes, result.dims); + ASSERT_TRUE(result.status == ov::intel_cpu::ShapeInferStatus::success); +} + +std::string boolToString(const bool value) { + return value ? "true" : "false"; +} + +} // namespace unit_test +} // namespace intel_cpu +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/custom_shape_infer.hpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/custom_shape_infer.hpp new file mode 100644 index 00000000000..a3e2d149c33 --- /dev/null +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/custom_shape_infer.hpp @@ -0,0 +1,41 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "cpu_types.h" +#include +#include +#include +#include + +#pragma once + +namespace ov { +namespace intel_cpu { +namespace unit_test { +void cpu_test_shape_infer(ov::Node* op, + const std::vector& input_shapes, + std::vector& output_shapes, + const std::map& constant_data = {}); + +using ShapeVector = std::vector; + +template +class OpCpuShapeInferenceTest : public testing::Test { +protected: + using op_type = TOp; + + ShapeVector input_shapes, output_shapes; + ov::intel_cpu::StaticShape exp_shape; + std::shared_ptr op; + + template + std::shared_ptr make_op(Args&&... args) { + return std::make_shared(std::forward(args)...); + } +}; + +std::string boolToString(const bool value); +} // namespace unit_test +} // namespace intel_cpu +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/elementwises.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/elementwises.cpp new file mode 100644 index 00000000000..6b69a6f7c63 --- /dev/null +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/elementwises.cpp @@ -0,0 +1,30 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "custom_shape_infer.hpp" +#include "openvino/op/ops.hpp" +namespace ov { +namespace intel_cpu { +namespace unit_test { +namespace cpu_shape_infer { + +using namespace ov; +using namespace ov::intel_cpu; + +TEST(CpuShapeInfer, UnaryEltwiseTest) { + auto data = std::make_shared(element::f32, PartialShape{-1, -1, -1, -1}); + auto node = std::make_shared(data); + + std::vector static_input_shapes = {StaticShape{3, 6, 5, 5}}, + static_output_shapes = {StaticShape{3, 6, 5, 5}}; + + unit_test::cpu_test_shape_infer(node.get(), static_input_shapes, static_output_shapes); +} +} // namespace cpu_shape_infer +} // namespace unit_test +} // namespace intel_cpu +} // namespace ov + diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/fullconnect.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/fullconnect.cpp new file mode 100644 index 00000000000..b8aedecdc94 --- /dev/null +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/fullconnect.cpp @@ -0,0 +1,29 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include "transformations/cpu_opset/common/op/fully_connected.hpp" +#include "custom_shape_infer.hpp" +namespace ov { +namespace intel_cpu { +namespace unit_test { +namespace cpu_shape_infer { + +using namespace ov; +using namespace ov::intel_cpu; + +TEST(CpuShapeInfer, FC_InputSize_2) { + auto activate = std::make_shared(element::f32, PartialShape{-1, -1 }); + auto weight = std::make_shared(element::f32, PartialShape{5, 6}); + auto op = std::make_shared(activate, weight, ov::Rank(5), element::f32); + std::vector static_input_shapes = {StaticShape{720, 640}, {5, 6}}; + std::vector static_output_shapes = {StaticShape{1, 1, 1, 720, 5}}; + unit_test::cpu_test_shape_infer(op.get(), static_input_shapes, static_output_shapes); +} + +} // namespace cpu_shape_infer +} // namespace unit_test +} // namespace intel_cpu +} // namespace ov + diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/gather.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/gather.cpp new file mode 100644 index 00000000000..e462cada491 --- /dev/null +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/gather.cpp @@ -0,0 +1,86 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include "custom_shape_infer.hpp" +#include "openvino/op/ops.hpp" +namespace ov { +namespace intel_cpu { +namespace unit_test { +namespace cpu_shape_infer { + +using namespace ov; +using namespace ov::intel_cpu; +using namespace testing; + +using TestParams = std::tuple; + +template +class CpuShapeInferenceGatherTest : public unit_test::OpCpuShapeInferenceTest { +protected: + void SetUp() override { + this->output_shapes.resize(0); + } + + std::shared_ptr make_gather(const unit_test::ShapeVector& shapes, const int32_t* const axis_val_ptr = nullptr) { + const auto p_dims = std::vector(shapes[0].size(), -1); + const auto i_dims = std::vector(shapes[1].size(), -1); + auto param = std::make_shared(element::f32, PartialShape{p_dims}); + auto indicies = std::make_shared(element::i32, PartialShape{i_dims}); + + if (axis_val_ptr) { + auto axis = op::v0::Constant::create(element::i32, ov::Shape{}, {*axis_val_ptr}); + return this->make_op(param, indicies, axis); + } else { + auto axis = std::make_shared(element::i32, PartialShape{}); + return this->make_op(param, indicies, axis); + } + } + + int32_t axis_val; +}; + +// Parameters for typed test used test case internal loop. +const auto GatherTestParams = + std::vector{make_tuple(0, unit_test::ShapeVector{{3, 2}, {2, 2}, {1}}, StaticShape({2, 2, 2})), + make_tuple(1, unit_test::ShapeVector{{3, 2}, {2, 2}, {1}}, StaticShape({3, 2, 2})), + make_tuple(-1, unit_test::ShapeVector{{3, 2}, {2, 2}, {1}}, StaticShape({3, 2, 2})), + make_tuple(0, unit_test::ShapeVector{{3, 2, 4}, {2, 1, 2}, {1}}, StaticShape({2, 1, 2, 2, 4})), + make_tuple(1, unit_test::ShapeVector{{3, 2, 4}, {2, 1, 2}, {1}}, StaticShape({3, 2, 1, 2, 4})), + make_tuple(-1, unit_test::ShapeVector{{3, 2, 4}, {2, 1, 2}, {}}, StaticShape({3, 2, 2, 1, 2})), + make_tuple(-2, unit_test::ShapeVector{{3, 2, 4}, {2, 1, 2}, {}}, StaticShape({3, 2, 1, 2, 4}))}; + +TYPED_TEST_SUITE_P(CpuShapeInferenceGatherTest); + +TYPED_TEST_P(CpuShapeInferenceGatherTest, axis_const) { + for (auto&& params : GatherTestParams) { + std::tie(this->axis_val, this->input_shapes, this->exp_shape) = params; + + auto op = this->make_gather(this->input_shapes, &this->axis_val); + this->output_shapes = {this->exp_shape}; + unit_test::cpu_test_shape_infer(op.get(), this->input_shapes, this->output_shapes); + } +} + +TYPED_TEST_P(CpuShapeInferenceGatherTest, axis_in_const_map) { + for (auto&& params : GatherTestParams) { + std::tie(this->axis_val, this->input_shapes, this->exp_shape) = params; + + auto op = this->make_gather(this->input_shapes); + auto axis_tensor = std::make_shared(element::i32, ov::Shape{1}, &this->axis_val); + + this->output_shapes = {this->exp_shape}; + unit_test::cpu_test_shape_infer(op.get(), this->input_shapes, this->output_shapes, {{2, axis_tensor}}); + } +} + +REGISTER_TYPED_TEST_SUITE_P(CpuShapeInferenceGatherTest, axis_const, axis_in_const_map); +using GatherTypes = Types; +INSTANTIATE_TYPED_TEST_SUITE_P(CpuShapeInfer, CpuShapeInferenceGatherTest, GatherTypes); + +} // namespace cpu_shape_infer +} // namespace unit_test +} // namespace intel_cpu +} // namespace ov + diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/matmul.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/matmul.cpp new file mode 100644 index 00000000000..b503b5e965a --- /dev/null +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/matmul.cpp @@ -0,0 +1,148 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#include + +#include "custom_shape_infer.hpp" +#include "openvino/op/ops.hpp" +namespace ov { +namespace intel_cpu { +namespace unit_test { +namespace cpu_shape_infer { + +using namespace ov; +using namespace ov::intel_cpu; +using namespace testing; + +using matmul_test_params_t = std::tuple; + +class CPUMatMulTest : public TestWithParam { +public: + static std::string getTestCaseName(const testing::TestParamInfo& obj) { + StaticShape tmp_input_shape_A; + StaticShape tmp_input_shape_B; + std::tie(tmp_input_shape_A, tmp_input_shape_B) = obj.param; + std::ostringstream result; + result << "IA" << tmp_input_shape_A << "_"; + result << "IB" << tmp_input_shape_B; + return result.str(); + } + +protected: + void SetUp() override { + std::tie(a_shape, b_shape) = GetParam(); + + set_exp_shape(); + output_shapes.clear(); + output_shapes.push_back(exp_shape); + } + + std::shared_ptr make_matmul(const size_t& a_dim_count, + const size_t& b_dim_count, + const bool transpose_a, + const bool transpose_b) { + auto a_input = std::make_shared(element::i64, PartialShape::dynamic(a_dim_count)); + auto b_input = std::make_shared(element::i64, PartialShape::dynamic(b_dim_count)); + + return std::make_shared(a_input, b_input, transpose_a, transpose_b); + } + + void set_exp_shape() { + if (a_shape.size() > 1 && b_shape.size() > 1) { + std::transform(a_shape.cbegin(), + a_shape.cend() - 2, + b_shape.cbegin(), + std::back_inserter(exp_shape), + [](const StaticDimension& a, const StaticDimension& b) { + return std::max(a.get_length(), b.get_length()); + }); + exp_shape.push_back(*std::next((*a_shape).rbegin())); + exp_shape.push_back((*b_shape).back()); + } else if (a_shape.size() == 1 && b_shape.size() > 1) { + exp_shape = b_shape; + (*exp_shape).erase(std::prev((*exp_shape).end(), 2)); + } else if (b_shape.size() == 1 && a_shape.size() > 1) { + exp_shape = a_shape; + (*exp_shape).erase(std::prev((*exp_shape).end())); + } + } + + static StaticShape make_transpose_input(const StaticShape& in) { + StaticShape out(in); + if (out.size() > 1) { + std::iter_swap((*out).rbegin(), std::next((*out).rbegin())); + } + return out; + } + + StaticShape a_shape, b_shape, exp_shape; + unit_test::ShapeVector output_shapes; +}; + +TEST_P(CPUMatMulTest, no_input_transpose) { + GTEST_SKIP() << "Skipping test, please check CVS-108946"; + const auto matmul = make_matmul(a_shape.size(), b_shape.size(), false, false); + + std::vector static_input_shapes = {a_shape, b_shape}; + + // TODO 108946,below test case can't pass + unit_test::cpu_test_shape_infer(matmul.get(), static_input_shapes, output_shapes); +} + +TEST_P(CPUMatMulTest, transpose_input_a) { + GTEST_SKIP() << "Skipping test, please check CVS-108946"; + const auto matmul = make_matmul(a_shape.size(), b_shape.size(), true, false); + + const auto a_transpose = make_transpose_input(a_shape); + std::vector static_input_shapes = {a_transpose, b_shape}; + + // TODO 108946,below test case can't pass + unit_test::cpu_test_shape_infer(matmul.get(), static_input_shapes, output_shapes); +} + +TEST_P(CPUMatMulTest, transpose_input_b) { + GTEST_SKIP() << "Skipping test, please check CVS-108946"; + const auto matmul = make_matmul(a_shape.size(), b_shape.size(), false, true); + + const auto b_transpose = make_transpose_input(b_shape); + std::vector static_input_shapes = {a_shape, b_transpose}; + + // TODO 108946,below test case can't pass + unit_test::cpu_test_shape_infer(matmul.get(), static_input_shapes, output_shapes); +} + +TEST_P(CPUMatMulTest, transpose_inputs_a_b) { + GTEST_SKIP() << "Skipping test, please check CVS-108946"; + const auto matmul = make_matmul(a_shape.size(), b_shape.size(), true, true); + + const auto a_transpose = make_transpose_input(a_shape); + const auto b_transpose = make_transpose_input(b_shape); + + std::vector static_input_shapes = {a_transpose, b_transpose}; + + // TODO 108946,below test case can't pass + unit_test::cpu_test_shape_infer(matmul.get(), static_input_shapes, output_shapes); +} + +/** \brief Use transpose order -> output shape dimensions shall be as transpose order. */ +INSTANTIATE_TEST_SUITE_P(CpuShapeInfer, + CPUMatMulTest, + Values(make_tuple(StaticShape({1}), StaticShape({1})), + make_tuple(StaticShape({1}), StaticShape({1, 3})), + make_tuple(StaticShape({1}), StaticShape({1, 1, 3})), + make_tuple(StaticShape({3, 1}), StaticShape({1})), + make_tuple(StaticShape({3, 2, 1}), StaticShape({1})), + make_tuple(StaticShape({3}), StaticShape({3})), + make_tuple(StaticShape({5, 2}), StaticShape({2, 6})), + make_tuple(StaticShape({2, 1, 2}), StaticShape({2, 6})), + make_tuple(StaticShape({10, 8, 9, 2}), StaticShape({10, 8, 2, 8})), + make_tuple(StaticShape({3, 1, 4, 3, 4}), StaticShape({3, 2, 1, 4, 1}))), + CPUMatMulTest::getTestCaseName); + +} // namespace cpu_shape_infer +} // namespace unit_test +} // namespace intel_cpu +} // namespace ov + diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/ngram.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/ngram.cpp new file mode 100644 index 00000000000..6a6689ae544 --- /dev/null +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/ngram.cpp @@ -0,0 +1,28 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include "transformations/cpu_opset/common/op/ngram.hpp" +#include "custom_shape_infer.hpp" +namespace ov { +namespace intel_cpu { +namespace unit_test { +namespace cpu_shape_infer { + +using namespace ov; +using namespace ov::intel_cpu; + +TEST(CpuShapeInfer, Ngram) { + auto embeddings = std::make_shared(element::f32, PartialShape{-1, -1}); + auto idces = std::make_shared(element::i32, PartialShape{-1, -1}); + auto op = std::make_shared(embeddings, idces, 3); + std::vector static_input_shapes = {StaticShape{720, 640}, {5, 6}}; + std::vector static_output_shapes = {StaticShape{720, 640 * 3}}; + unit_test::cpu_test_shape_infer(op.get(), static_input_shapes, static_output_shapes); +} +} // namespace cpu_shape_infer +} // namespace unit_test +} // namespace intel_cpu +} // namespace ov + diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/one_hot.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/one_hot.cpp new file mode 100644 index 00000000000..629fbb9bfed --- /dev/null +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/one_hot.cpp @@ -0,0 +1,129 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "common_test_utils/test_assertions.hpp" +#include "custom_shape_infer.hpp" +#include "openvino/op/ops.hpp" +namespace ov { +namespace intel_cpu { +namespace unit_test { +namespace cpu_shape_infer { + +using namespace ov; +using namespace ov::intel_cpu; +using namespace testing; + +using OneHotTestParams = std::tuple; + +class OneHotCpuShapeInferenceTest : public unit_test::OpCpuShapeInferenceTest, + public WithParamInterface { +public: + static std::string getTestCaseName(const testing::TestParamInfo& obj) { + unit_test::ShapeVector tmp_input_shapes; + int64_t tmp_depth; + int32_t tmp_on; + int32_t tmp_off; + StaticShape tmp_exp_shape; + std::tie(tmp_input_shapes, tmp_depth, tmp_on, tmp_off, tmp_exp_shape) = obj.param; + std::ostringstream result; + result << "IS" << CommonTestUtils::vec2str(tmp_input_shapes) << "_"; + result << "depth" << tmp_depth << "_"; + result << "on" << tmp_on << "_"; + result << "off" << tmp_off << "_"; + result << "exp_shape" << tmp_exp_shape; + return result.str(); + } + +protected: + void SetUp() override { + std::tie(input_shapes, m_depth, m_on, m_off, exp_shape) = GetParam(); + output_shapes = unit_test::ShapeVector(0); + output_shapes.push_back(exp_shape); + arg = std::make_shared(element::i64, input_shapes.front().get_shape()); + } + + int64_t m_depth; + int32_t m_on; + int32_t m_off; + std::shared_ptr arg; +}; + +TEST_P(OneHotCpuShapeInferenceTest , shape_inference_empty_const_map) { + const auto depth = op::v0::Constant::create(element::i64, ov::Shape{}, {m_depth}); + const auto on_value = op::v0::Constant::create(element::i32, ov::Shape{}, {m_on}); + const auto off_value = op::v0::Constant::create(element::i32, ov::Shape{}, {m_off}); + int64_t axis = -1; + const auto op = make_op(arg, depth, on_value, off_value, axis); + unit_test::cpu_test_shape_infer(op.get(), input_shapes, output_shapes); +} + +TEST_P(OneHotCpuShapeInferenceTest , shape_inference_with_const_map) { + const auto depth = std::make_shared(element::i64, ov::Shape{}); + const auto on = std::make_shared(element::i32, ov::Shape{}); + const auto off = std::make_shared(element::i32, ov::Shape{}); + int64_t axis = -1; + const auto op = make_op(arg, depth, on, off, axis); + + const auto depth_const = std::make_shared(element::i64, ov::Shape{}, std::vector{m_depth}); + const auto on_const = std::make_shared(element::i32, ov::Shape{}, std::vector{m_on}); + const auto off_const = std::make_shared(element::i32, ov::Shape{}, std::vector{m_off}); + const auto depth_tensor = std::make_shared(depth_const); + const auto on_tensor = std::make_shared(on_const); + const auto off_tensor = std::make_shared(off_const); + const std::map& constant_data = {{1, depth_tensor}, + {2, on_tensor}, + {3, off_tensor}}; + + unit_test::cpu_test_shape_infer(op.get(), input_shapes, output_shapes, constant_data); +} + +INSTANTIATE_TEST_SUITE_P( + CpuShapeInfer, + OneHotCpuShapeInferenceTest , + Values(make_tuple(unit_test::ShapeVector{{3}, {}, {}, {}}, 2, 5, 10, StaticShape({3, 2})), + make_tuple(unit_test::ShapeVector{{3}, {}, {}, {}}, 2, 1, 0, StaticShape({3, 2}))), + OneHotCpuShapeInferenceTest::getTestCaseName); + +using OneHotCpuShapeInferenceThrowExceptionTest = OneHotCpuShapeInferenceTest; +TEST_P(OneHotCpuShapeInferenceThrowExceptionTest, wrong_pattern) { + GTEST_SKIP() << "Skipping test, please check CVS-108946"; + const auto depth = std::make_shared(element::i64, ov::Shape{}); + const auto on = std::make_shared(element::i32, ov::Shape{}); + const auto off = std::make_shared(element::i32, ov::Shape{}); + int64_t axis = -1; + const auto op = make_op(arg, depth, on, off, axis); + + const auto depth_const = std::make_shared(element::i64, ov::Shape{}, std::vector{m_depth}); + const auto on_const = std::make_shared(element::i32, ov::Shape{}, std::vector{m_on}); + const auto off_const = std::make_shared(element::i32, ov::Shape{}, std::vector{m_off}); + const auto depth_tensor = std::make_shared(depth_const); + const auto on_tensor = std::make_shared(on_const); + const auto off_tensor = std::make_shared(off_const); + const std::map& constant_data = {{1, depth_tensor}, + {2, on_tensor}, + {3, off_tensor}}; + + // TODO , implementation should throw exception + ASSERT_THROW(unit_test::cpu_test_shape_infer(op.get(), input_shapes, output_shapes, constant_data), + ov::Exception); +} + +INSTANTIATE_TEST_SUITE_P( + CpuShapeInfer, + OneHotCpuShapeInferenceThrowExceptionTest, + Values(make_tuple(unit_test::ShapeVector{{3}, {}, {}, {}}, -2, 1, 0, StaticShape({}))), + OneHotCpuShapeInferenceThrowExceptionTest::getTestCaseName); + +} // namespace cpu_shape_infer +} // namespace unit_test +} // namespace intel_cpu +} // namespace ov + diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/prior_box.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/prior_box.cpp new file mode 100644 index 00000000000..f9dd7f707fe --- /dev/null +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/prior_box.cpp @@ -0,0 +1,209 @@ +// Copyright (C) 2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "common_test_utils/test_assertions.hpp" +#include "custom_shape_infer.hpp" +#include +#include +namespace ov { +namespace intel_cpu { +namespace unit_test { +namespace cpu_shape_infer { + +using namespace ov; +using namespace ov::intel_cpu; +using namespace testing; + +// TODO should support v8::PriorBox + +using PriorBoxV0TestParams = std::tuple>, // layer_data, image_data + StaticShape // Expected shape + >; + +class PriorBoxV0CpuShapeInferenceTest : public unit_test::OpCpuShapeInferenceTest, + public WithParamInterface { +public: + static std::string getTestCaseName(const testing::TestParamInfo& obj) { + unit_test::ShapeVector tmp_input_shapes; + op::v0::PriorBox::Attributes tmp_attrs; + std::vector> tmp_data; + StaticShape tmp_exp_shape; + std::tie(tmp_input_shapes, tmp_attrs, tmp_data, tmp_exp_shape) = obj.param; + std::ostringstream result; + result << "IS" << CommonTestUtils::vec2str(tmp_input_shapes) << "_"; + result << "min_size" << CommonTestUtils::vec2str(tmp_attrs.min_size) << "_"; + result << "max_size" << CommonTestUtils::vec2str(tmp_attrs.max_size) << "_"; + result << "density" << CommonTestUtils::vec2str(tmp_attrs.density) << "_"; + result << "fixed_ratio" << CommonTestUtils::vec2str(tmp_attrs.fixed_ratio) << "_"; + result << "fixed_size" << CommonTestUtils::vec2str(tmp_attrs.fixed_size) << "_"; + result << "clip(" << unit_test::boolToString(tmp_attrs.clip) << ")_"; + result << "flip(" << unit_test::boolToString(tmp_attrs.flip) << ")_"; + result << "step(" << tmp_attrs.step << ")_"; + result << "offset(" << tmp_attrs.offset << ")_"; + result << "variance" << CommonTestUtils::vec2str(tmp_attrs.variance) << "_"; + result << "scale_all_sizes(" << unit_test::boolToString(tmp_attrs.scale_all_sizes) << ")_"; + result << "exp_shape(" << tmp_exp_shape << ")"; + return result.str(); + } + +protected: + void SetUp() override { + std::tie(input_shapes, attrs, data, exp_shape) = GetParam(); + output_shapes = unit_test::ShapeVector(0); + output_shapes.push_back(exp_shape); + ASSERT_EQ(input_shapes.size(), 2); + ASSERT_EQ(data.size(), 2); + } + + op::v0::PriorBox::Attributes attrs; + std::vector> data; +}; + +namespace prior_box { +const op::v0::PriorBox::Attributes createAttrs( + std::vector min_size, + std::vector max_size, + std::vector aspect_ratio, + std::vector density, + std::vector fixed_ratio, + std::vector fixed_size, + bool clip, + bool flip, + float step, + float offset, + std::vector variance, + bool scale_all_sizes); +const op::v0::PriorBox::Attributes createAttrs( + std::vector min_size, + std::vector max_size, + std::vector aspect_ratio, + std::vector density, + std::vector fixed_ratio, + std::vector fixed_size, + bool clip, + bool flip, + float step, + float offset, + std::vector variance, + bool scale_all_sizes) { + op::v0::PriorBox::Attributes attrs; + attrs.min_size = min_size; + attrs.max_size = max_size; + attrs.aspect_ratio = aspect_ratio; + attrs.density = density; + attrs.fixed_ratio = fixed_ratio; + attrs.fixed_size = fixed_size; + attrs.clip = clip; + attrs.flip = flip; + attrs.step = step; + attrs.offset = offset; + attrs.variance = variance; + attrs.scale_all_sizes = scale_all_sizes; + return attrs; +} +const op::v0::PriorBox::Attributes attrs1 = createAttrs( + {16.0f}, // min_size Desired min_size of prior boxes + {38.46f}, // max_size Desired max_size of prior boxes + {2.0f}, // aspect_ratio Aspect ratios of prior boxes + {}, // density + {}, // fixed_ratio + {}, // fixed_size + false, // clip Clip output to [0, 1] + true, // flip Flip aspect ratios + 16.0f, // step Distance between prior box centers + 0.5f, // offset Box offset relative to top center of image + {0.1f, 0.1f, 0.2f, 0.2f}, // variance Values to adjust prior boxes with + true // scale_all_sizes Scale all sizes +); + +const op::v0::PriorBox::Attributes attrs2 = createAttrs( + {2.0f, 3.0f}, // min_size Desired min_size of prior boxes + {}, // max_size Desired max_size of prior boxes + {1.5f, 2.0f, 2.5f}, // aspect_ratio Aspect ratios of prior boxes + {}, // density + {}, // fixed_ratio + {}, // fixed_size + false, // clip Clip output to [0, 1] + false, // flip Flip aspect ratios + 0.0f, // step Distance between prior box centers + 0.0f, // offset Box offset relative to top center of image + {}, // variance Values to adjust prior boxes with + false // scale_all_sizes Scale all sizes +); + +const op::v0::PriorBox::Attributes attrs3 = createAttrs( + {2.0f, 3.0f}, // min_size Desired min_size of prior boxes + {}, // max_size Desired max_size of prior boxes + {1.5f, 2.0f, 2.5f}, // aspect_ratio Aspect ratios of prior boxes + {}, // density + {}, // fixed_ratio + {}, // fixed_size + false, // clip Clip output to [0, 1] + true, // flip Flip aspect ratios + 0.0f, // step Distance between prior box centers + 0.0f, // offset Box offset relative to top center of image + {}, // variance Values to adjust prior boxes with + false // scale_all_sizes Scale all sizes +); + +const op::v0::PriorBox::Attributes attrs4 = createAttrs( + {256.0f}, // min_size Desired min_size of prior boxes + {315.0f}, // max_size Desired max_size of prior boxes + {2.0f}, // aspect_ratio Aspect ratios of prior boxes + {}, // density + {}, // fixed_ratio + {}, // fixed_size + false, // clip Clip output to [0, 1] + true, // flip Flip aspect ratios + 0.0f, // step Distance between prior box centers + 0.0f, // offset Box offset relative to top center of image + {}, // variance Values to adjust prior boxes with + true // scale_all_sizes Scale all sizes +); + +} // namespace prior_box + +TEST_P(PriorBoxV0CpuShapeInferenceTest , shape_inference_empty_const_map) { + const auto layer_const = std::make_shared(element::i32, ov::Shape{2}, data[0]); + const auto image_const = std::make_shared(element::i32, ov::Shape{2}, data[1]); + auto op = make_op(layer_const, image_const, attrs); + unit_test::cpu_test_shape_infer(op.get(), input_shapes, output_shapes); +} + +TEST_P(PriorBoxV0CpuShapeInferenceTest , shape_inference_with_const_map) { + const auto layer_shape = std::make_shared(element::i32, PartialShape::dynamic()); + const auto image_shape = std::make_shared(element::i32, PartialShape::dynamic()); + auto op = make_op(layer_shape, image_shape, attrs); + + const auto layer_const = std::make_shared(element::i32, ov::Shape{2}, data[0]); + const auto image_const = std::make_shared(element::i32, ov::Shape{2}, data[1]); + const std::map const_data { + {0, std::make_shared(layer_const)}, + {1, std::make_shared(image_const)}, + }; + + unit_test::cpu_test_shape_infer(op.get(), input_shapes, output_shapes, const_data); +} + +INSTANTIATE_TEST_SUITE_P( + CpuShapeInfer, + PriorBoxV0CpuShapeInferenceTest , + Values(make_tuple(unit_test::ShapeVector{{2}, {2}}, prior_box::attrs1, + std::vector>{{24, 42}, {384, 672}}, StaticShape({2, 16128})), + make_tuple(unit_test::ShapeVector{{2}, {2}}, prior_box::attrs2, + std::vector>{{32, 32}, {384, 672}}, StaticShape({2, 20480})), + make_tuple(unit_test::ShapeVector{{2}, {2}}, prior_box::attrs3, + std::vector>{{32, 32}, {300, 300}}, StaticShape({2, 32768})), + make_tuple(unit_test::ShapeVector{{2}, {2}}, prior_box::attrs4, + std::vector>{{1, 1}, {300, 300}}, StaticShape({2, 16}))), + PriorBoxV0CpuShapeInferenceTest::getTestCaseName); + +} // namespace cpu_shape_infer +} // namespace unit_test +} // namespace intel_cpu +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/prior_box_clustered.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/prior_box_clustered.cpp new file mode 100644 index 00000000000..d1a4357f70c --- /dev/null +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/prior_box_clustered.cpp @@ -0,0 +1,174 @@ +// Copyright (C) 2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "common_test_utils/test_assertions.hpp" +#include "custom_shape_infer.hpp" +#include "openvino/op/parameter.hpp" +#include "openvino/op/prior_box_clustered.hpp" +#include "openvino/op/ops.hpp" +namespace ov { +namespace intel_cpu { +namespace unit_test { +namespace cpu_shape_infer { + +using namespace ov; +using namespace ov::intel_cpu; +using namespace testing; + +using PriorBoxClusteredV0TestParams = std::tuple>, // layer_data, image_data + StaticShape // Expected shape + >; + +class PriorBoxClusteredV0CpuShapeInferenceTest : public unit_test::OpCpuShapeInferenceTest, + public WithParamInterface { +public: + static std::string getTestCaseName(const testing::TestParamInfo& obj) { + unit_test::ShapeVector tmp_input_shapes; + op::v0::PriorBoxClustered::Attributes tmp_attrs; + std::vector> tmp_data; + StaticShape tmp_exp_shape; + std::tie(tmp_input_shapes, tmp_attrs, tmp_data, tmp_exp_shape) = obj.param; + std::ostringstream result; + + result << "IS" << CommonTestUtils::vec2str(tmp_input_shapes) << "_"; + result << "widths" << CommonTestUtils::vec2str(tmp_attrs.widths) << "_"; + result << "heights" << CommonTestUtils::vec2str(tmp_attrs.heights) << "_"; + result << "clip(" << unit_test::boolToString(tmp_attrs.clip) << ")_"; + result << "step_widths(" << tmp_attrs.step_widths<< ")_"; + result << "step_heights(" << tmp_attrs.step_heights << ")_"; + result << "offset(" << tmp_attrs.offset << ")_"; + result << "variances" << CommonTestUtils::vec2str(tmp_attrs.variances) << "_"; + result << "exp_shape(" << tmp_exp_shape << ")"; + return result.str(); + } + +protected: + void SetUp() override { + std::tie(input_shapes, attrs, data, exp_shape) = GetParam(); + output_shapes = unit_test::ShapeVector(0); + output_shapes.push_back(exp_shape); + ASSERT_LE(input_shapes.size(), 2); + ASSERT_LE(data.size(), 2); + } + + op::v0::PriorBoxClustered::Attributes attrs; + std::vector> data; + unit_test::ShapeVector input_shapes; +}; + +namespace prior_box_cluster { +const op::v0::PriorBoxClustered::Attributes createAttrs( + std::vector widths, + std::vector heights, + bool clip, + float step_widths, + float step_heights, + float step, + float offset, + std::vector variances); + +const op::v0::PriorBoxClustered::Attributes createAttrs( + std::vector widths, + std::vector heights, + bool clip, + float step_widths, + float step_heights, + float step, + float offset, + std::vector variances) { + op::v0::PriorBoxClustered::Attributes attrs; + attrs.widths = widths; + attrs.heights = heights; + attrs.clip = clip; + attrs.step_widths = step_widths; + attrs.step_heights = step_heights; + attrs.offset = offset; + attrs.variances = variances; + return attrs; +} + +const op::v0::PriorBoxClustered::Attributes attrs1 = createAttrs( + {2.0f, 3.0f} , // widths Desired widths of prior boxes + {1.5f, 2.0f}, // heights Desired heights of prior boxes + true, // clip Clip output to [0, 1] + 0.0f, // step_widths Distance between prior box centers + 0.0f, // step_heights Distance between prior box centers + 0.0f, // step Distance between prior box centers (when step_w = step_h) + 0.0f, // offset Box offset relative to top center of image + {} // variances Values to adjust prior boxes with +); + +const op::v0::PriorBoxClustered::Attributes attrs2 = createAttrs( + {86.0f, 13.0f, 57.0f, 39.0f, 68.0f, 34.0f, 142.0f, 50.0f, 23.0f}, // widths Desired widths of prior boxes + {44.0f, 10.0f, 30.0f, 19.0f, 94.0f, 32.0f, 61.0f, 53.0f, 17.0f}, // heights Desired heights of prior boxes + false, // clip Clip output to [0, 1] + 0.0f, // step_widths Distance between prior box centers + 0.0f, // step_heights Distance between prior box centers + 16.0f, // step Distance between prior box centers (when step_w = step_h) + 0.5f, // offset Box offset relative to top center of image + {0.1f, 0.1f, 0.2f, 0.2f} // variances Values to adjust prior boxes with +); + +const op::v0::PriorBoxClustered::Attributes attrs3 = createAttrs( + {4.0f, 2.0f, 3.2f} , // widths Desired widths of prior boxes + {1.0f, 2.0f, 1.1f}, // heights Desired heights of prior boxes + true, // clip Clip output to [0, 1] + 0.0f, // step_widths Distance between prior box centers + 0.0f, // step_heights Distance between prior box centers + 0.0f, // step Distance between prior box centers (when step_w = step_h) + 0.0f, // offset Box offset relative to top center of image + {} // variances Values to adjust prior boxes with +); + +} // namespace prior_box_cluster + +TEST_P(PriorBoxClusteredV0CpuShapeInferenceTest , shape_inference_empty_const_map) { + const auto layer_const = std::make_shared(element::i32, ov::Shape{2}, data[0]); + std::shared_ptr op; + if (input_shapes.size() == 2) { + const auto image_const = std::make_shared(element::i32, ov::Shape{2}, data[1]); + op = make_op(layer_const, image_const, attrs); + } else { + const auto image_param = std::make_shared(element::i32, PartialShape::dynamic()); + op = make_op(layer_const, image_param, attrs); + } + unit_test::cpu_test_shape_infer(op.get(), input_shapes, output_shapes); +} + +TEST_P(PriorBoxClusteredV0CpuShapeInferenceTest , shape_inference_with_const_map) { + const auto layer_shape = std::make_shared(element::i32, PartialShape::dynamic()); + const auto image_shape = std::make_shared(element::i32, PartialShape::dynamic()); + auto op = make_op(layer_shape, image_shape, attrs); + const auto layer_const = std::make_shared(element::i32, ov::Shape{2}, data[0]); + std::map const_data{{0, std::make_shared(layer_const)}}; + + if (input_shapes.size() == 2) { + const auto image_const = std::make_shared(element::i32, ov::Shape{2}, data[1]); + const_data.insert({1, std::make_shared(image_const)}); + } + unit_test::cpu_test_shape_infer(op.get(), input_shapes, output_shapes, const_data); +} + +INSTANTIATE_TEST_SUITE_P( + CpuShapeInfer, + PriorBoxClusteredV0CpuShapeInferenceTest , + Values(make_tuple(unit_test::ShapeVector{{2}}, prior_box_cluster::attrs1, + std::vector>{{2, 5}}, StaticShape({2, 80})), + make_tuple(unit_test::ShapeVector{{2}, {2}}, prior_box_cluster::attrs1, + std::vector>{{12, 16}, {50, 50}}, StaticShape({2, 1536})), + make_tuple(unit_test::ShapeVector{{2}, {2}}, prior_box_cluster::attrs2, + std::vector>{{10, 19}, {180, 300}}, StaticShape({2, 6840})), + make_tuple(unit_test::ShapeVector{{2}, {2}}, prior_box_cluster::attrs3, + std::vector>{{19, 19}, {300, 300}}, StaticShape({2, 4332}))), + PriorBoxClusteredV0CpuShapeInferenceTest::getTestCaseName); + +} // namespace cpu_shape_infer +} // namespace unit_test +} // namespace intel_cpu +} // namespace ov + diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/reshape.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/reshape.cpp new file mode 100644 index 00000000000..111b7c36d72 --- /dev/null +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/reshape.cpp @@ -0,0 +1,137 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include + +#include "common_test_utils/test_assertions.hpp" +#include "custom_shape_infer.hpp" +#include "openvino/op/ops.hpp" +namespace ov { +namespace intel_cpu { +namespace unit_test { +namespace cpu_shape_infer { + +using namespace ov; +using namespace ov::intel_cpu; +using namespace testing; + +using ReshapeTestParams = std::tuple, // reshape axes + StaticShape, // Expected shape + bool // specal zero + >; + +class ReshapeCpuShapeInferenceTest : public unit_test::OpCpuShapeInferenceTest, + public WithParamInterface { +public: + static std::string getTestCaseName(const testing::TestParamInfo& obj) { + unit_test::ShapeVector tmp_input_shapes; + std::vector tmp_axes; + StaticShape tmp_exp_shape; + bool tmp_specialZero; + std::tie(tmp_input_shapes, tmp_axes, tmp_exp_shape, tmp_specialZero) = obj.param; + std::ostringstream result; + result << "IS" << CommonTestUtils::vec2str(tmp_input_shapes) << "_"; + result << "axes" << CommonTestUtils::vec2str(tmp_axes) << "_"; + result << "exp_shape(" << tmp_exp_shape << ")_"; + result << "specalZero(" << unit_test::boolToString(tmp_specialZero) << ")"; + return result.str(); + } + +protected: + void SetUp() override { + std::tie(input_shapes, axes, exp_shape, specalZero) = GetParam(); + output_shapes = unit_test::ShapeVector(0); + arg = std::make_shared(element::f32, input_shapes.front().get_shape()); + } + + std::vector axes; + std::shared_ptr arg; + bool specalZero; +}; + +TEST_P(ReshapeCpuShapeInferenceTest , shape_inference_empty_const_map) { + const auto axes_node = std::make_shared(element::i64, ov::Shape{axes.size()}, axes); + const auto op = make_op(arg, axes_node, specalZero); + output_shapes.push_back(exp_shape); + unit_test::cpu_test_shape_infer(op.get(), input_shapes, output_shapes); +} + +TEST_P(ReshapeCpuShapeInferenceTest , shape_inference_with_const_map) { + const auto axes_node = std::make_shared(element::i64, PartialShape::dynamic()); + const auto op = make_op(arg, axes_node, specalZero); + + const auto axes_const = std::make_shared(element::i64, ov::Shape{axes.size()}, axes); + const auto axes_tensor = std::make_shared(axes_const); + const std::map& constant_data = {{1, axes_tensor}}; + + output_shapes.push_back(exp_shape); + unit_test::cpu_test_shape_infer(op.get(), input_shapes, output_shapes, constant_data); +} + +INSTANTIATE_TEST_SUITE_P( + CpuShapeInfer, + ReshapeCpuShapeInferenceTest , + Values(make_tuple(unit_test::ShapeVector{{1, 2, 3, 1}, {2}}, std::vector{0, -1}, StaticShape({1, 6}), true), + make_tuple(unit_test::ShapeVector{{1, 2, 3, 8}, {4}}, std::vector{1, 2, 3, 8}, StaticShape({1, 2, 3, 8}), true), + make_tuple(unit_test::ShapeVector{{1, 2, 3, 8}, {4}}, std::vector{0, 2, 0, 8}, StaticShape({1, 2, 3, 8}), true), + make_tuple(unit_test::ShapeVector{{0, 2, 2}, {2}}, std::vector{0, -1}, StaticShape({0, 4}), true), + make_tuple(unit_test::ShapeVector{{0, 2, 2}, {2}}, std::vector{0, 4}, StaticShape({0, 4}), true), + make_tuple(unit_test::ShapeVector{{4, 0, 2}, {2}}, std::vector{-1, 0}, StaticShape({8, 0}), true), + make_tuple(unit_test::ShapeVector{{1, 2, 3, 8}, {4}}, std::vector{1, 2, 3, 8}, StaticShape({1, 2, 3, 8}), false), + make_tuple(unit_test::ShapeVector{{0, 2, 2}, {2}}, std::vector{3, 0}, StaticShape({3, 0}), false), + make_tuple(unit_test::ShapeVector{{0, 2, 2}, {2}}, std::vector{4, 0}, StaticShape({4, 0}), false), + make_tuple(unit_test::ShapeVector{{3, 6, 5, 5}, {2}}, std::vector{0, -1}, StaticShape({3, 150}), true)), + ReshapeCpuShapeInferenceTest::getTestCaseName); + +using ReshapeCpuShapeInferenceThrowExceptionTest = ReshapeCpuShapeInferenceTest; + +TEST_P(ReshapeCpuShapeInferenceThrowExceptionTest, wrong_pattern) { + const auto axes_node = std::make_shared(element::i64, PartialShape::dynamic()); + const auto op = make_op(arg, axes_node, specalZero); + + const auto axes_const = std::make_shared(element::i64, ov::Shape{axes.size()}, axes); + const auto axes_tensor = std::make_shared(axes_const); + const std::map& constant_data = {{1, axes_tensor}}; + std::ostringstream os; + os << "[cpu]reshape: the shape of input data "; + os << "("; + for (size_t i = 0; i < input_shapes[0].size(); i++) { + os << input_shapes[0][i]; + if (i < input_shapes[0].size() - 1) { + os << "."; + } + } + os << ")"; + os << " conflicts with the reshape pattern "; + os << "("; + for (size_t i = 0; i < axes.size(); i++) { + os << axes[i]; + if (i < axes.size() - 1) { + os << "."; + } + } + os << ")"; + + OV_EXPECT_THROW(unit_test::cpu_test_shape_infer(op.get(), input_shapes, output_shapes, constant_data), + ov::Exception, + HasSubstr(os.str())); +} + +INSTANTIATE_TEST_SUITE_P( + CpuShapeInfer, + ReshapeCpuShapeInferenceThrowExceptionTest, + Values(make_tuple(unit_test::ShapeVector{{1, 2, 3, 1}, {3}}, std::vector{0, -1, -1}, StaticShape({}), true), + make_tuple(unit_test::ShapeVector{{1, 2, 3, 8}, {4}}, std::vector{1, 2, 3, 6}, StaticShape({}), true), + make_tuple(unit_test::ShapeVector{{1, 2, 3, 8}, {4}}, std::vector{0, 3, 0, 8}, StaticShape({}), true), + make_tuple(unit_test::ShapeVector{{1, 2, 3, 8}, {4}}, std::vector{1, 2, 3, 6}, StaticShape({}), false), + make_tuple(unit_test::ShapeVector{{0, 2, 2}, {2}}, std::vector{3, 3}, StaticShape({}), false)), + ReshapeCpuShapeInferenceThrowExceptionTest::getTestCaseName); + +} // namespace cpu_shape_infer +} // namespace unit_test +} // namespace intel_cpu +} // namespace ov + diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/shape_node.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/shape_node.cpp new file mode 100644 index 00000000000..6611b760b40 --- /dev/null +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/shape_node.cpp @@ -0,0 +1,59 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include + +#include "common_test_utils/test_assertions.hpp" +#include "custom_shape_infer.hpp" +#include "openvino/op/ops.hpp" +namespace ov { +namespace intel_cpu { +namespace unit_test { +namespace cpu_shape_infer { + +using namespace ov; +using namespace ov::intel_cpu; +using namespace testing; + +TEST(CpuShapeInfer, ShapeOf5DTest) { + auto data = std::make_shared(element::f32, PartialShape{-1, -1, -1, -1}); + + auto shapeof = + std::make_shared(data); + + std::vector static_input_shapes = {StaticShape{2, 3, 4, 5, 6}}, + static_output_shapes = {StaticShape{5}}; + unit_test::cpu_test_shape_infer(shapeof.get(), static_input_shapes, static_output_shapes); +} + +TEST(CpuShapeInfer, v3ShapeOf5DTest) { + auto data = std::make_shared(element::f32, PartialShape{-1, -1, -1, -1}); + + auto shapeof = + std::make_shared(data); + + std::vector static_input_shapes = {StaticShape{2, 3, 4, 5, 6}}, + static_output_shapes = {StaticShape{5}}; + unit_test::cpu_test_shape_infer(shapeof.get(), static_input_shapes, static_output_shapes); +} + +TEST(CpuShapeInfer, ShapeOf0DTest) { + GTEST_SKIP() << "Skipping test, please check CVS-108946"; + auto data = std::make_shared(element::f32, PartialShape{}); + + auto shapeof = + std::make_shared(data); + + std::vector static_input_shapes = {StaticShape{}}, + static_output_shapes = {StaticShape{}}; + // TODO , can't pass implementation don't support 0D shape input + unit_test::cpu_test_shape_infer(shapeof.get(), static_input_shapes, static_output_shapes); +} + +} // namespace cpu_shape_infer +} // namespace unit_test +} // namespace intel_cpu +} // namespace ov + diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/squeeze.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/squeeze.cpp new file mode 100644 index 00000000000..b4a1d4b3980 --- /dev/null +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/squeeze.cpp @@ -0,0 +1,138 @@ +// Copyright (C) 2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "common_test_utils/test_assertions.hpp" +#include +#include "custom_shape_infer.hpp" +#include "openvino/op/ops.hpp" +namespace ov { +namespace intel_cpu { +namespace unit_test { +namespace cpu_shape_infer { + +using namespace ov; +using namespace ov::intel_cpu; +using namespace testing; + +using SqueezeTestParams = std::tuple, // Squeeze axes + StaticShape // Expected shape + >; + +class SqueezeCpuShapeInferenceTest : public unit_test::OpCpuShapeInferenceTest, + public WithParamInterface { +public: + static std::string getTestCaseName(const testing::TestParamInfo& obj) { + unit_test::ShapeVector tmp_input_shapes; + std::vector tmp_axes; + StaticShape tmp_exp_shape; + std::tie(tmp_input_shapes, tmp_axes, tmp_exp_shape) = obj.param; + std::ostringstream result; + result << "IS" << CommonTestUtils::vec2str(tmp_input_shapes) << "_"; + result << "axes" << CommonTestUtils::vec2str(tmp_axes) << "_"; + result << "exp_shape(" << tmp_exp_shape << ")"; + return result.str(); + } + +protected: + void SetUp() override { + std::tie(input_shapes, axes, exp_shape) = GetParam(); + output_shapes = unit_test::ShapeVector(0); + output_shapes.push_back(exp_shape); + arg = std::make_shared(element::f32, input_shapes.front().get_shape()); + } + + std::vector axes; + std::shared_ptr arg; +}; + +TEST_P(SqueezeCpuShapeInferenceTest , shape_inference_empty_const_map) { + const auto axes_node = std::make_shared(element::i64, ov::Shape{axes.size()}, axes); + const auto op = make_op(arg, axes_node); + unit_test::cpu_test_shape_infer(op.get(), input_shapes, output_shapes); +} + +TEST_P(SqueezeCpuShapeInferenceTest , shape_inference_with_const_map) { + const auto axes_node = std::make_shared(element::i64, PartialShape::dynamic()); + const auto op = make_op(arg, axes_node); + + const auto axes_const = std::make_shared(element::i64, ov::Shape{axes.size()}, axes); + const auto axes_tensor = std::make_shared(axes_const); + const std::map& constant_data = {{1, axes_tensor}}; + + unit_test::cpu_test_shape_infer(op.get(), input_shapes, output_shapes, constant_data); +} + +INSTANTIATE_TEST_SUITE_P( + CpuShapeInfer, + SqueezeCpuShapeInferenceTest , + Values(make_tuple(unit_test::ShapeVector{{1}, {1}}, std::vector{-1}, StaticShape({})), + make_tuple(unit_test::ShapeVector{{1}, {1}}, std::vector{0}, StaticShape({})), + make_tuple(unit_test::ShapeVector{{1, 2, 3, 1}, {2}}, std::vector{0, 3}, StaticShape({2, 3})), + make_tuple(unit_test::ShapeVector{{2, 1, 1, 4}, {2}}, std::vector{2, 1}, StaticShape({2, 4})), + make_tuple(unit_test::ShapeVector{{1, 3, 1, 2, 1}, {3}}, std::vector{0, 2, 4}, StaticShape({3, 2})), + make_tuple(unit_test::ShapeVector{{1, 3, 1, 2, 1}, {3}}, std::vector{4, 2, 0}, StaticShape({3, 2})), + make_tuple(unit_test::ShapeVector{{1, 3, 1, 2, 1}, {3}}, std::vector{2, 0, 4}, StaticShape({3, 2})), + make_tuple(unit_test::ShapeVector{{10, 1, 0, 1, 3, 1, 1}, {4}}, + std::vector{1, -1, 3, -2}, + StaticShape({10, 0, 3})), + make_tuple(unit_test::ShapeVector{{10, 1, 0, 1, 3, 1, 1}}, std::vector{}, StaticShape({10, 0, 3})), + make_tuple(unit_test::ShapeVector{{10, 1, 0, 1, 3, 1, 1}, {}}, std::vector{}, StaticShape({10, 0, 3})), + make_tuple(unit_test::ShapeVector{{2, 1, 7, 8, 3}, {1}}, std::vector{1}, StaticShape({2, 7, 8, 3})), + make_tuple(unit_test::ShapeVector{{2, 1, 3}, {2}}, std::vector{1, 1}, StaticShape({2, 3})), + make_tuple(unit_test::ShapeVector{{3, 1, 2, 1}, {3}}, std::vector{1, -1, 1}, StaticShape({3, 2})), + make_tuple(unit_test::ShapeVector{{3, 1, 2, 1}, {4}}, std::vector{1, -1, 1, -1}, StaticShape({3, 2})), + make_tuple(unit_test::ShapeVector{{1, 3, 1, 2, 1}, {5}}, std::vector{2, -1, 2, -1, 0}, StaticShape({3, 2})), + make_tuple(unit_test::ShapeVector{{2, 6, 7, 8, 1}, {2}}, std::vector{-1, -1}, StaticShape({2, 6, 7, 8}))), + SqueezeCpuShapeInferenceTest::getTestCaseName); + +using SqueezeCpuShapeInferenceThrowExceptionTest = SqueezeCpuShapeInferenceTest; +TEST_P(SqueezeCpuShapeInferenceThrowExceptionTest, wrong_pattern) { + const auto axes_node = std::make_shared(element::i64, PartialShape::dynamic()); + const auto op = make_op(arg, axes_node); + + const auto axes_const = std::make_shared(element::i64, ov::Shape{axes.size()}, axes); + const auto axes_tensor = std::make_shared(axes_const); + const std::map& constant_data = {{1, axes_tensor}}; + std::ostringstream os; + os << "[cpu]squeeze: the shape of input data "; + os << "("; + for (size_t i = 0; i < input_shapes[0].size(); i++) { + os << input_shapes[0][i]; + if (i < input_shapes[0].size() - 1) { + os << "."; + } + } + os << ")"; + os << " conflicts with the squeeze pattern "; + os << "("; + for (size_t i = 0; i < axes.size(); i++) { + os << axes[i]; + if (i < axes.size() - 1) { + os << "."; + } + } + os << ")"; + + OV_EXPECT_THROW(unit_test::cpu_test_shape_infer(op.get(), input_shapes, output_shapes, constant_data), + ov::Exception, + HasSubstr(os.str())); +} + +INSTANTIATE_TEST_SUITE_P( + CpuShapeInfer, + SqueezeCpuShapeInferenceThrowExceptionTest, + Values(make_tuple(unit_test::ShapeVector{{1, 2, 3, 1}, {1}}, std::vector{1}, StaticShape({})), + make_tuple(unit_test::ShapeVector{{1, 2, 3, 8}, {1}}, std::vector{2}, StaticShape({})), + make_tuple(unit_test::ShapeVector{{1, 2, 3, 8}, {2}}, std::vector{1, 2}, StaticShape({})), + make_tuple(unit_test::ShapeVector{{1, 2, 3, 8}, {1}}, std::vector{-1}, StaticShape({})), + make_tuple(unit_test::ShapeVector{{1, 2, 3, 8}, {2}}, std::vector{-1, -1}, StaticShape({})), + make_tuple(unit_test::ShapeVector{{1, 2, 3, 8}, {2}}, std::vector{-1, -2}, StaticShape({}))), + SqueezeCpuShapeInferenceThrowExceptionTest::getTestCaseName); + +} // namespace cpu_shape_infer +} // namespace unit_test +} // namespace intel_cpu +} // namespace ov + diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/strided_slice.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/strided_slice.cpp new file mode 100644 index 00000000000..8ea01dabead --- /dev/null +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/strided_slice.cpp @@ -0,0 +1,136 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include "custom_shape_infer.hpp" +#include +#include "openvino/op/ops.hpp" +namespace ov { +namespace intel_cpu { +namespace unit_test { +namespace cpu_shape_infer { + +using namespace ov; +using namespace ov::intel_cpu; +using namespace testing; + +using StridedSliceParams = std::tuple>, // data{begin,end,stride} + std::vector, // begin_mask + std::vector, // end_mask + StaticShape // Expected shape + >; + +class StridedSliceCpuShapeInferenceTest : public unit_test::OpCpuShapeInferenceTest, + public WithParamInterface { +public: + enum DATA_INDEX { + BEGIN = 0, + END = 1, + STRIDE = 2, + }; + static std::string getTestCaseName(const testing::TestParamInfo& obj) { + unit_test::ShapeVector tmp_input_shapes; + std::vector> tmp_data; + std::vector tmp_begin_mask; + std::vector tmp_end_mask; + StaticShape tmp_exp_shape; + std::tie(tmp_input_shapes, tmp_data, tmp_begin_mask, tmp_end_mask, tmp_exp_shape) = obj.param; + std::ostringstream result; + result << "IS" << CommonTestUtils::vec2str(tmp_input_shapes) << "_"; + result << "begin" << CommonTestUtils::vec2str(tmp_data[BEGIN]) << "_"; + result << "end" << CommonTestUtils::vec2str(tmp_data[END]) << "_"; + result << "stride" << CommonTestUtils::vec2str(tmp_data[STRIDE]) << "_"; + result << "begin_mask" << CommonTestUtils::vec2str(tmp_begin_mask) << "_"; + result << "end_mask" << CommonTestUtils::vec2str(tmp_end_mask) << "_"; + result << "exp_shape(" << tmp_exp_shape << ")"; + return result.str(); + } + +protected: + void SetUp() override { + std::tie(input_shapes, data, begin_mask, end_mask, exp_shape) = GetParam(); + output_shapes = unit_test::ShapeVector(0); + output_shapes.push_back(exp_shape); + ASSERT_EQ(input_shapes.size(), 4); + arg = std::make_shared(element::f32, ov::PartialShape::dynamic()); + } + std::vector> data; + std::vector begin_mask; + std::vector end_mask; + std::shared_ptr arg; +}; + +TEST_P(StridedSliceCpuShapeInferenceTest , shape_inference_empty_const_map) { + const auto begin = op::v0::Constant::create(element::i32, input_shapes[1].get_shape(), data[BEGIN]); + const auto end = op::v0::Constant::create(element::i32, input_shapes[2].get_shape(), data[END]); + const auto stride = op::v0::Constant::create(element::i32, input_shapes[3].get_shape(), data[STRIDE]); + const auto op = make_op(arg, begin, end, stride, begin_mask, end_mask); + // implementation depends on some output information of the op + op->set_output_type(0, element::i32, {-1, -1, -1}); + unit_test::cpu_test_shape_infer(op.get(), input_shapes, output_shapes); +} + +TEST_P(StridedSliceCpuShapeInferenceTest , shape_inference_in_const_map) { + const auto begin = std::make_shared(element::i32, input_shapes[1].get_shape()); + const auto end = std::make_shared(element::i32, input_shapes[2].get_shape()); + const auto stride = std::make_shared(element::i32, input_shapes[3].get_shape()); + const auto op = make_op(arg, begin, end, stride, begin_mask, end_mask); + + const auto begin_const = std::make_shared(element::i32, input_shapes[1].get_shape(), data[BEGIN]); + const auto end_const = std::make_shared(element::i32, input_shapes[2].get_shape(), data[END]); + const auto stride_const = std::make_shared(element::i32, input_shapes[3].get_shape(), data[STRIDE]); + const auto begin_tensor = std::make_shared(begin_const); + const auto end_tensor = std::make_shared(end_const); + const auto stride_tensor = std::make_shared(stride_const); + const std::map& constant_data = {{1, begin_tensor}, + {2, end_tensor}, + {3, stride_tensor}}; + // implementation depends on some output information of the op + op->set_output_type(0, element::i32, {-1, -1, -1}); + unit_test::cpu_test_shape_infer(op.get(), input_shapes, output_shapes, constant_data); +} + +INSTANTIATE_TEST_SUITE_P( + CpuShapeInfer, + StridedSliceCpuShapeInferenceTest, + Values(make_tuple(unit_test::ShapeVector{{3, 4, 5}, {3}, {3}, {3}}, std::vector>{{100}, {-100}, {-1}}, + std::vector(4, 0), std::vector(4, 0), StaticShape({3, 4, 5})), + make_tuple(unit_test::ShapeVector{{3, 2, 3}, {3}, {3}, {3}}, std::vector>{{1, 0, 0}, {2, 1, 3}, {1, 1, 1}}, + std::vector(4, 0), std::vector(4, 0), StaticShape({1, 1, 3})), + make_tuple(unit_test::ShapeVector{{3, 2, 3}, {3}, {3}, {3}}, std::vector>{{1, 0, 0}, {2, 2, 3}, {1, 1, 1}}, + std::vector(4, 0), std::vector(4, 0), StaticShape({1, 2, 3})), + make_tuple(unit_test::ShapeVector{{3, 2, 3}, {3}, {3}, {3}}, std::vector>{{2, 0, 0}, {3, 2, 3}, {1, 1, 2}}, + std::vector(4, 0), std::vector(4, 0), StaticShape({1, 2, 2})), + make_tuple(unit_test::ShapeVector{{3, 2, 3}, {3}, {3}, {3}}, std::vector>{{1, 0, 0}, {0, 0, 0}, {1, 1, 1}}, + std::vector{0, 1, 1}, std::vector(3, 1), StaticShape({2, 2, 3})), + make_tuple(unit_test::ShapeVector{{3, 2, 3}, {3}, {3}, {3}}, std::vector>{{0, 1, 0}, {2, 0, 0}, {1, 1, 2}}, + std::vector{1, 0, 1}, std::vector{0, 1, 1}, StaticShape({2, 1, 2}))), + // TODO 108946, can't pass; + // make_tuple(unit_test::ShapeVector{{3, 2, 3}, {3}, {3}, {3}}, std::vector>{{0, 0, 0}, {1, 0, 0}, {1, 1, -1}}, + // std::vector{0, 1, 1}, std::vector{0, 1, 1}, StaticShape({1, 1, 3}))), + StridedSliceCpuShapeInferenceTest::getTestCaseName); + +TEST(CpuShapeInfer, StridedSliceDefault_stride) { + GTEST_SKIP() << "Skipping test, please check CVS-108946"; + const auto mask = std::vector{0, 1, 0}; + + const auto data = std::make_shared(element::f32, ov::PartialShape::dynamic()); + // only supprot i32 + const auto begin = op::v0::Constant::create(element::i32, ov::Shape{3}, {0, 0, 0}); + const auto end = op::v0::Constant::create(element::i32, ov::Shape{3}, {1, 0, 2}); + const auto op = std::make_shared(data, begin, end, mask, mask); + + std::vector static_input_shapes = {{3, 2, 3}, {3}, {3}}; + std::vector static_output_shapes = {StaticShape{1, 2, 2}}; + // implementation depends on some output information of the op + op->set_output_type(0, element::i32, {-1, -1, -1}); + // TODO 108946,there is some issue in implementation, this test case can't pass + unit_test::cpu_test_shape_infer(op.get(), static_input_shapes, static_output_shapes); +} +} // namespace cpu_shape_infer +} // namespace unit_test +} // namespace intel_cpu +} // namespace ov + diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/transpose.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/transpose.cpp new file mode 100644 index 00000000000..862591844e6 --- /dev/null +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/transpose.cpp @@ -0,0 +1,102 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#include "common_test_utils/test_assertions.hpp" +#include +#include "custom_shape_infer.hpp" +#include "openvino/op/ops.hpp" +namespace ov { +namespace intel_cpu { +namespace unit_test { +namespace cpu_shape_infer { + +using namespace ov; +using namespace ov::intel_cpu; +using namespace testing; + +using transpose_params = std::tuple, // transpose order + StaticShape // Expected shape + >; + +class TransposeCpuShapeInferenceTest : public unit_test::OpCpuShapeInferenceTest, + public WithParamInterface { +public: + static std::string getTestCaseName(const testing::TestParamInfo& obj) { + unit_test::ShapeVector tmp_input_shapes; + std::vector tmp_transpose_order; + StaticShape tmp_exp_shape; + std::tie(tmp_input_shapes, tmp_transpose_order, tmp_exp_shape) = obj.param; + std::ostringstream result; + result << "input_shapes(" << CommonTestUtils::vec2str(tmp_input_shapes) << ")_"; + result << "order(" << CommonTestUtils::vec2str(tmp_transpose_order) << ")_"; + result << "exp_shape(" << tmp_exp_shape << ")"; + return result.str(); + } + +protected: + void SetUp() override { + std::tie(input_shapes, transpose_order, exp_shape) = GetParam(); + output_shapes = unit_test::ShapeVector(0); + output_shapes.push_back(exp_shape); + ASSERT_EQ(input_shapes.size(), 2); + arg = std::make_shared(element::f32, input_shapes.front().get_shape()); + } + + std::vector transpose_order; + std::shared_ptr arg; +}; + +TEST_P(TransposeCpuShapeInferenceTest , shape_inference_empty_const_map) { + const auto order = + std::make_shared(element::i64, ov::Shape{transpose_order.size()}, transpose_order); + auto op = make_op(arg, order); + unit_test::cpu_test_shape_infer(op.get(), input_shapes, output_shapes); +} + +/** \brief Use transpose order -> output shape dimensions shall be as transpose order. */ +INSTANTIATE_TEST_SUITE_P( + CpuShapeInfer, + TransposeCpuShapeInferenceTest, + Values(make_tuple(unit_test::ShapeVector{{3}, {1}}, std::vector{0}, StaticShape({3})), + make_tuple(unit_test::ShapeVector{{5, 2}, {2}}, std::vector{0, 1}, StaticShape({5, 2})), + make_tuple(unit_test::ShapeVector{{8, 3}, {2}}, std::vector{1, 0}, StaticShape({3, 8})), + make_tuple(unit_test::ShapeVector{{1, 0, 2}, {3}}, std::vector{2, 0, 1}, StaticShape({2, 1, 0})), + make_tuple(unit_test::ShapeVector{{10, 8, 9, 2}, {4}}, std::vector{2, 0, 3, 1}, StaticShape({9, 10, 2, 8})), + make_tuple(unit_test::ShapeVector{{1, 2, 3, 4}, {4}}, std::vector{1, 3, 2, 0}, StaticShape({2, 4, 3, 1})), + make_tuple(unit_test::ShapeVector{{1}, {0}}, std::vector{}, StaticShape({1})), + make_tuple(unit_test::ShapeVector{{23}, {0}}, std::vector{}, StaticShape({23})), + make_tuple(unit_test::ShapeVector{{3, 8}, {0}}, std::vector{}, StaticShape({8, 3})), + make_tuple(unit_test::ShapeVector{{1, 0, 2}, {0}}, std::vector{}, StaticShape({2, 0, 1})), + make_tuple(unit_test::ShapeVector{{21, 1, 5, 9}, {0}}, std::vector{}, StaticShape({9, 5, 1, 21})), + make_tuple(unit_test::ShapeVector{{0, 0, 0}, {0}}, std::vector{}, StaticShape({0, 0, 0})), + make_tuple(unit_test::ShapeVector{{0, 2, 0}, {0}}, std::vector{}, StaticShape({0, 2, 0})), + make_tuple(unit_test::ShapeVector{{0, 2, 0, 0}, {0}}, std::vector{}, StaticShape({0, 0, 2, 0}))), + TransposeCpuShapeInferenceTest::getTestCaseName); + +using TransposeCpuShapeInferenceThrowExceptionTest = TransposeCpuShapeInferenceTest; +TEST_P(TransposeCpuShapeInferenceThrowExceptionTest, shape_inference_in_const_map) { + const auto order = std::make_shared(element::i64, PartialShape::dynamic()); + auto op = make_op(arg, order); + + const auto axes = std::make_shared(element::i64, ov::Shape{transpose_order.size()}, transpose_order); + const auto const_tensor = std::make_shared(axes); + const std::map const_map = {{1, const_tensor}}; + + OV_EXPECT_THROW(unit_test::cpu_test_shape_infer(op.get(), input_shapes, output_shapes, const_map), + ov::Exception, + HasSubstr("TODO: Support parameterized Order input for dynamic shapes.")); +} + +INSTANTIATE_TEST_SUITE_P( + CpuShapeInfer, + TransposeCpuShapeInferenceThrowExceptionTest, + Values(make_tuple(unit_test::ShapeVector{{3}, {1}}, std::vector{0}, StaticShape({3})), + make_tuple(unit_test::ShapeVector{{1}, {0}}, std::vector{}, StaticShape({1}))), + TransposeCpuShapeInferenceThrowExceptionTest::getTestCaseName); + +} // namespace cpu_shape_infer +} // namespace unit_test +} // namespace intel_cpu +} // namespace ov + diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/unsqueeze.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/unsqueeze.cpp new file mode 100644 index 00000000000..a01e53a0c66 --- /dev/null +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/unsqueeze.cpp @@ -0,0 +1,138 @@ +// Copyright (C) 2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "common_test_utils/test_assertions.hpp" +#include +#include "custom_shape_infer.hpp" +#include "openvino/op/ops.hpp" +namespace ov { +namespace intel_cpu { +namespace unit_test { +namespace cpu_shape_infer { + +using namespace ov; +using namespace ov::intel_cpu; +using namespace testing; +using UnsqueezeTestParams = std::tuple, // Unsqueeze axes + StaticShape // Expected shape + >; + +class UnsqueezeCpuShapeInferenceTest : public unit_test::OpCpuShapeInferenceTest, + public WithParamInterface { +public: + static std::string getTestCaseName(const testing::TestParamInfo& obj) { + unit_test::ShapeVector tmp_input_shapes; + std::vector tmp_axes; + StaticShape tmp_exp_shape; + std::tie(tmp_input_shapes, tmp_axes, tmp_exp_shape) = obj.param; + std::ostringstream result; + result << "IS" << CommonTestUtils::vec2str(tmp_input_shapes) << "_"; + result << "axes" << CommonTestUtils::vec2str(tmp_axes) << "_"; + result << "exp_shape(" << tmp_exp_shape << ")"; + return result.str(); + } + +protected: + void SetUp() override { + std::tie(input_shapes, axes, exp_shape) = GetParam(); + + output_shapes = unit_test::ShapeVector(0); + arg = std::make_shared(element::f32, input_shapes.front().get_shape()); + } + + std::vector axes; + std::shared_ptr arg; +}; + +TEST_P(UnsqueezeCpuShapeInferenceTest , shape_inference_empty_const_map) { + const auto axes_node = std::make_shared(element::i64, ov::Shape{axes.size()}, axes); + op = std::make_shared(arg, axes_node); + output_shapes.push_back(exp_shape); + unit_test::cpu_test_shape_infer(op.get(), input_shapes, output_shapes); +} + +TEST_P(UnsqueezeCpuShapeInferenceTest , shape_inference_with_const_map) { + const auto axes_node = std::make_shared(element::i64, PartialShape::dynamic()); + op = std::make_shared(arg, axes_node); + + const auto axes_const = std::make_shared(element::i64, ov::Shape{axes.size()}, axes); + const auto axes_tensor = std::make_shared(axes_const); + const std::map& constant_data = {{1, axes_tensor}}; + output_shapes.push_back(exp_shape); + unit_test::cpu_test_shape_infer(op.get(), input_shapes, output_shapes, constant_data); +} + +INSTANTIATE_TEST_SUITE_P( + CpuShapeInfer, + UnsqueezeCpuShapeInferenceTest , + Values(make_tuple(unit_test::ShapeVector{{0}, {1}}, std::vector{-1}, StaticShape({0, 1})), + make_tuple(unit_test::ShapeVector{{0}, {1}}, std::vector{0}, StaticShape({1, 0})), + make_tuple(unit_test::ShapeVector{{1}, {1}}, std::vector{1}, StaticShape({1, 1})), + make_tuple(unit_test::ShapeVector{{2}, {1}}, std::vector{0}, StaticShape({1, 2})), + make_tuple(unit_test::ShapeVector{{2}, {1}}, std::vector{1}, StaticShape({2, 1})), + make_tuple(unit_test::ShapeVector{{2}, {1}}, std::vector{-1}, StaticShape({2, 1})), + make_tuple(unit_test::ShapeVector{{2}, {1}}, std::vector{-2}, StaticShape({1, 2})), + make_tuple(unit_test::ShapeVector{{2, 3}, {2}}, std::vector{0, 3}, StaticShape({1, 2, 3, 1})), + make_tuple(unit_test::ShapeVector{{2, 4}, {2}}, std::vector{2, 1}, StaticShape({2, 1, 1, 4})), + make_tuple(unit_test::ShapeVector{{3, 2}, {3}}, std::vector{0, 2, 4}, StaticShape({1, 3, 1, 2, 1})), + make_tuple(unit_test::ShapeVector{{3, 2}, {3}}, std::vector{4, 2, 0}, StaticShape({1, 3, 1, 2, 1})), + make_tuple(unit_test::ShapeVector{{3, 2}, {3}}, std::vector{2, 0, 4}, StaticShape({1, 3, 1, 2, 1})), + make_tuple(unit_test::ShapeVector{{10, 0, 3}, {4}}, + std::vector{1, -1, 3, -2}, + StaticShape({10, 1, 0, 1, 3, 1, 1})), + make_tuple(unit_test::ShapeVector{{2, 6, 7, 8, 3}, {1}}, std::vector{0}, StaticShape({1, 2, 6, 7, 8, 3})), + make_tuple(unit_test::ShapeVector{{2, 3}, {2}}, std::vector{1, 1}, StaticShape({2, 1, 3})), + make_tuple(unit_test::ShapeVector{{3, 2}, {3}}, std::vector{1, -1, 1}, StaticShape({3, 1, 2, 1})), + make_tuple(unit_test::ShapeVector{{3, 2}, {4}}, std::vector{1, -1, 1, -1}, StaticShape({3, 1, 2, 1})), + make_tuple(unit_test::ShapeVector{{3, 2}, {5}}, std::vector{2, -1, 2, -1, 0}, StaticShape({1, 3, 1, 2, 1})), + make_tuple(unit_test::ShapeVector{{2, 6, 7, 8, 3}, {2}}, std::vector{-1, -1}, StaticShape({2, 6, 7, 8, 3, 1}))), + UnsqueezeCpuShapeInferenceTest::getTestCaseName); + +using UnsqueezeCpuShapeInferenceThrowExceptionTest = UnsqueezeCpuShapeInferenceTest; +TEST_P(UnsqueezeCpuShapeInferenceThrowExceptionTest, wrong_pattern) { + const auto axes_node = std::make_shared(element::i64, PartialShape::dynamic()); + const auto op = make_op(arg, axes_node); + + const auto axes_const = std::make_shared(element::i64, ov::Shape{axes.size()}, axes); + const auto axes_tensor = std::make_shared(axes_const); + const std::map& constant_data = {{1, axes_tensor}}; + std::ostringstream os; + os << "[cpu]unsqueeze: the shape of input data "; + os << "("; + for (size_t i = 0; i < input_shapes[0].size(); i++) { + os << input_shapes[0][i]; + if (i < input_shapes[0].size() - 1) { + os << "."; + } + } + os << ")"; + os << " conflicts with the unsqueeze pattern "; + os << "("; + for (size_t i = 0; i < axes.size(); i++) { + os << axes[i]; + if (i < axes.size() - 1) { + os << "."; + } + } + os << ")"; + + OV_EXPECT_THROW(unit_test::cpu_test_shape_infer(op.get(), input_shapes, output_shapes, constant_data), + ov::Exception, + HasSubstr(os.str())); +} + +INSTANTIATE_TEST_SUITE_P( + CpuShapeInfer, + UnsqueezeCpuShapeInferenceThrowExceptionTest, + Values(make_tuple(unit_test::ShapeVector{{1, 2}, {1}}, std::vector{3}, StaticShape({})), + make_tuple(unit_test::ShapeVector{{1, 2}, {2}}, std::vector{3, -1}, StaticShape({})), + make_tuple(unit_test::ShapeVector{{1, 2}, {1}}, std::vector{-4}, StaticShape({}))), + UnsqueezeCpuShapeInferenceThrowExceptionTest::getTestCaseName); + +} // namespace cpu_shape_infer +} // namespace unit_test +} // namespace intel_cpu +} // namespace ov + diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/gather_nd_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/gather_nd_shape_inference_test.cpp index 3ec26de7375..f3cf457802b 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/gather_nd_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/gather_nd_shape_inference_test.cpp @@ -9,7 +9,7 @@ #include "openvino/op/ops.hpp" #include "openvino/util/common_util.hpp" #include "utils.hpp" -#include "utils/shape_inference/shape_inference.hpp" +#include "shape_inference/shape_inference.hpp" using namespace ov; using namespace ov::intel_cpu; diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/gather_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/gather_shape_inference_test.cpp index 511f77d4bb6..40eb4bf2d3d 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/gather_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/gather_shape_inference_test.cpp @@ -8,7 +8,7 @@ #include #include #include -#include +#include #include "utils.hpp" diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/make_shape_inference.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/make_shape_inference.cpp index f79ce70d1cb..c6f9e315b63 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/make_shape_inference.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/make_shape_inference.cpp @@ -6,8 +6,8 @@ #include #include #include -#include -#include +#include +#include #include "ngraph_functions/builders.hpp" #include #include diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/static_shape_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/static_shape_test.cpp index e82cb120fb9..b41e6973812 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/static_shape_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/static_shape_test.cpp @@ -5,7 +5,7 @@ #include #include "common_test_utils/test_assertions.hpp" -#include "utils/shape_inference/static_shape.hpp" +#include "shape_inference/static_shape.hpp" using namespace testing; using namespace ov::intel_cpu; diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/transpose_shape_infernece_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/transpose_shape_infernece_test.cpp index d32a592f119..b905d641d01 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/transpose_shape_infernece_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/transpose_shape_infernece_test.cpp @@ -6,8 +6,8 @@ #include "openvino/op/parameter.hpp" #include "openvino/op/transpose.hpp" #include "transpose_shape_inference.hpp" +#include "shape_inference/static_shape.hpp" #include "utils.hpp" -#include "utils/shape_inference/static_shape.hpp" using namespace ov; using namespace ov::intel_cpu; diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/utils.hpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/utils.hpp index 23afd192ec0..a7cf2cbd62c 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/utils.hpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/utils.hpp @@ -3,13 +3,12 @@ // #pragma once - #include #include "openvino/op/ops.hpp" #include "openvino/op/parameter.hpp" -#include "utils/shape_inference/shape_inference.hpp" -#include "utils/shape_inference/static_shape.hpp" +#include "shape_inference/shape_inference.hpp" +#include "shape_inference/static_shape.hpp" using ShapeVector = std::vector; namespace ov {